Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 5efc27bb

History | View | Annotate | Download (151.4 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* XXX: add a is_user flag to have proper security support */
594
#define PUSHW(ssp, sp, sp_mask, val)\
595
{\
596
    sp -= 2;\
597
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598
}
599

    
600
#define PUSHL(ssp, sp, sp_mask, val)\
601
{\
602
    sp -= 4;\
603
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604
}
605

    
606
#define POPW(ssp, sp, sp_mask, val)\
607
{\
608
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609
    sp += 2;\
610
}
611

    
612
#define POPL(ssp, sp, sp_mask, val)\
613
{\
614
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615
    sp += 4;\
616
}
617

    
618
/* protected mode interrupt */
619
static void do_interrupt_protected(int intno, int is_int, int error_code,
620
                                   unsigned int next_eip, int is_hw)
621
{
622
    SegmentCache *dt;
623
    target_ulong ptr, ssp;
624
    int type, dpl, selector, ss_dpl, cpl;
625
    int has_error_code, new_stack, shift;
626
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
    uint32_t old_eip, sp_mask;
628

    
629
    has_error_code = 0;
630
    if (!is_int && !is_hw) {
631
        switch(intno) {
632
        case 8:
633
        case 10:
634
        case 11:
635
        case 12:
636
        case 13:
637
        case 14:
638
        case 17:
639
            has_error_code = 1;
640
            break;
641
        }
642
    }
643
    if (is_int)
644
        old_eip = next_eip;
645
    else
646
        old_eip = env->eip;
647

    
648
    dt = &env->idt;
649
    if (intno * 8 + 7 > dt->limit)
650
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651
    ptr = dt->base + intno * 8;
652
    e1 = ldl_kernel(ptr);
653
    e2 = ldl_kernel(ptr + 4);
654
    /* check gate type */
655
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
656
    switch(type) {
657
    case 5: /* task gate */
658
        /* must do that check here to return the correct error code */
659
        if (!(e2 & DESC_P_MASK))
660
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662
        if (has_error_code) {
663
            int type;
664
            uint32_t mask;
665
            /* push the error code */
666
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
667
            shift = type >> 3;
668
            if (env->segs[R_SS].flags & DESC_B_MASK)
669
                mask = 0xffffffff;
670
            else
671
                mask = 0xffff;
672
            esp = (ESP - (2 << shift)) & mask;
673
            ssp = env->segs[R_SS].base + esp;
674
            if (shift)
675
                stl_kernel(ssp, error_code);
676
            else
677
                stw_kernel(ssp, error_code);
678
            SET_ESP(esp, mask);
679
        }
680
        return;
681
    case 6: /* 286 interrupt gate */
682
    case 7: /* 286 trap gate */
683
    case 14: /* 386 interrupt gate */
684
    case 15: /* 386 trap gate */
685
        break;
686
    default:
687
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688
        break;
689
    }
690
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691
    cpl = env->hflags & HF_CPL_MASK;
692
    /* check privilege if software int */
693
    if (is_int && dpl < cpl)
694
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695
    /* check valid bit */
696
    if (!(e2 & DESC_P_MASK))
697
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
698
    selector = e1 >> 16;
699
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700
    if ((selector & 0xfffc) == 0)
701
        raise_exception_err(EXCP0D_GPF, 0);
702

    
703
    if (load_segment(&e1, &e2, selector) != 0)
704
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708
    if (dpl > cpl)
709
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710
    if (!(e2 & DESC_P_MASK))
711
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713
        /* to inner privilege */
714
        get_ss_esp_from_tss(&ss, &esp, dpl);
715
        if ((ss & 0xfffc) == 0)
716
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717
        if ((ss & 3) != dpl)
718
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
722
        if (ss_dpl != dpl)
723
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724
        if (!(ss_e2 & DESC_S_MASK) ||
725
            (ss_e2 & DESC_CS_MASK) ||
726
            !(ss_e2 & DESC_W_MASK))
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if (!(ss_e2 & DESC_P_MASK))
729
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730
        new_stack = 1;
731
        sp_mask = get_sp_mask(ss_e2);
732
        ssp = get_seg_base(ss_e1, ss_e2);
733
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734
        /* to same privilege */
735
        if (env->eflags & VM_MASK)
736
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737
        new_stack = 0;
738
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
739
        ssp = env->segs[R_SS].base;
740
        esp = ESP;
741
        dpl = cpl;
742
    } else {
743
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744
        new_stack = 0; /* avoid warning */
745
        sp_mask = 0; /* avoid warning */
746
        ssp = 0; /* avoid warning */
747
        esp = 0; /* avoid warning */
748
    }
749

    
750
    shift = type >> 3;
751

    
752
#if 0
753
    /* XXX: check that enough room is available */
754
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755
    if (env->eflags & VM_MASK)
756
        push_size += 8;
757
    push_size <<= shift;
758
#endif
759
    if (shift == 1) {
760
        if (new_stack) {
761
            if (env->eflags & VM_MASK) {
762
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766
            }
767
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768
            PUSHL(ssp, esp, sp_mask, ESP);
769
        }
770
        PUSHL(ssp, esp, sp_mask, compute_eflags());
771
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772
        PUSHL(ssp, esp, sp_mask, old_eip);
773
        if (has_error_code) {
774
            PUSHL(ssp, esp, sp_mask, error_code);
775
        }
776
    } else {
777
        if (new_stack) {
778
            if (env->eflags & VM_MASK) {
779
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783
            }
784
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785
            PUSHW(ssp, esp, sp_mask, ESP);
786
        }
787
        PUSHW(ssp, esp, sp_mask, compute_eflags());
788
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789
        PUSHW(ssp, esp, sp_mask, old_eip);
790
        if (has_error_code) {
791
            PUSHW(ssp, esp, sp_mask, error_code);
792
        }
793
    }
794

    
795
    if (new_stack) {
796
        if (env->eflags & VM_MASK) {
797
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801
        }
802
        ss = (ss & ~3) | dpl;
803
        cpu_x86_load_seg_cache(env, R_SS, ss,
804
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805
    }
806
    SET_ESP(esp, sp_mask);
807

    
808
    selector = (selector & ~3) | dpl;
809
    cpu_x86_load_seg_cache(env, R_CS, selector,
810
                   get_seg_base(e1, e2),
811
                   get_seg_limit(e1, e2),
812
                   e2);
813
    cpu_x86_set_cpl(env, dpl);
814
    env->eip = offset;
815

    
816
    /* interrupt gate clear IF mask */
817
    if ((type & 1) == 0) {
818
        env->eflags &= ~IF_MASK;
819
    }
820
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
821
}
822

    
823
#ifdef TARGET_X86_64
824

    
825
#define PUSHQ(sp, val)\
826
{\
827
    sp -= 8;\
828
    stq_kernel(sp, (val));\
829
}
830

    
831
#define POPQ(sp, val)\
832
{\
833
    val = ldq_kernel(sp);\
834
    sp += 8;\
835
}
836

    
837
static inline target_ulong get_rsp_from_tss(int level)
838
{
839
    int index;
840

    
841
#if 0
842
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843
           env->tr.base, env->tr.limit);
844
#endif
845

    
846
    if (!(env->tr.flags & DESC_P_MASK))
847
        cpu_abort(env, "invalid tss");
848
    index = 8 * level + 4;
849
    if ((index + 7) > env->tr.limit)
850
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851
    return ldq_kernel(env->tr.base + index);
852
}
853

    
854
/* 64 bit interrupt */
855
static void do_interrupt64(int intno, int is_int, int error_code,
856
                           target_ulong next_eip, int is_hw)
857
{
858
    SegmentCache *dt;
859
    target_ulong ptr;
860
    int type, dpl, selector, cpl, ist;
861
    int has_error_code, new_stack;
862
    uint32_t e1, e2, e3, ss;
863
    target_ulong old_eip, esp, offset;
864

    
865
    has_error_code = 0;
866
    if (!is_int && !is_hw) {
867
        switch(intno) {
868
        case 8:
869
        case 10:
870
        case 11:
871
        case 12:
872
        case 13:
873
        case 14:
874
        case 17:
875
            has_error_code = 1;
876
            break;
877
        }
878
    }
879
    if (is_int)
880
        old_eip = next_eip;
881
    else
882
        old_eip = env->eip;
883

    
884
    dt = &env->idt;
885
    if (intno * 16 + 15 > dt->limit)
886
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887
    ptr = dt->base + intno * 16;
888
    e1 = ldl_kernel(ptr);
889
    e2 = ldl_kernel(ptr + 4);
890
    e3 = ldl_kernel(ptr + 8);
891
    /* check gate type */
892
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893
    switch(type) {
894
    case 14: /* 386 interrupt gate */
895
    case 15: /* 386 trap gate */
896
        break;
897
    default:
898
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
899
        break;
900
    }
901
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902
    cpl = env->hflags & HF_CPL_MASK;
903
    /* check privilege if software int */
904
    if (is_int && dpl < cpl)
905
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
    /* check valid bit */
907
    if (!(e2 & DESC_P_MASK))
908
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
909
    selector = e1 >> 16;
910
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911
    ist = e2 & 7;
912
    if ((selector & 0xfffc) == 0)
913
        raise_exception_err(EXCP0D_GPF, 0);
914

    
915
    if (load_segment(&e1, &e2, selector) != 0)
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920
    if (dpl > cpl)
921
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927
        /* to inner privilege */
928
        if (ist != 0)
929
            esp = get_rsp_from_tss(ist + 3);
930
        else
931
            esp = get_rsp_from_tss(dpl);
932
        esp &= ~0xfLL; /* align stack */
933
        ss = 0;
934
        new_stack = 1;
935
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936
        /* to same privilege */
937
        if (env->eflags & VM_MASK)
938
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0;
940
        if (ist != 0)
941
            esp = get_rsp_from_tss(ist + 3);
942
        else
943
            esp = ESP;
944
        esp &= ~0xfLL; /* align stack */
945
        dpl = cpl;
946
    } else {
947
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948
        new_stack = 0; /* avoid warning */
949
        esp = 0; /* avoid warning */
950
    }
951

    
952
    PUSHQ(esp, env->segs[R_SS].selector);
953
    PUSHQ(esp, ESP);
954
    PUSHQ(esp, compute_eflags());
955
    PUSHQ(esp, env->segs[R_CS].selector);
956
    PUSHQ(esp, old_eip);
957
    if (has_error_code) {
958
        PUSHQ(esp, error_code);
959
    }
960

    
961
    if (new_stack) {
962
        ss = 0 | dpl;
963
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964
    }
965
    ESP = esp;
966

    
967
    selector = (selector & ~3) | dpl;
968
    cpu_x86_load_seg_cache(env, R_CS, selector,
969
                   get_seg_base(e1, e2),
970
                   get_seg_limit(e1, e2),
971
                   e2);
972
    cpu_x86_set_cpl(env, dpl);
973
    env->eip = offset;
974

    
975
    /* interrupt gate clear IF mask */
976
    if ((type & 1) == 0) {
977
        env->eflags &= ~IF_MASK;
978
    }
979
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
980
}
981
#endif
982

    
983
#if defined(CONFIG_USER_ONLY)
984
void helper_syscall(int next_eip_addend)
985
{
986
    env->exception_index = EXCP_SYSCALL;
987
    env->exception_next_eip = env->eip + next_eip_addend;
988
    cpu_loop_exit();
989
}
990
#else
991
void helper_syscall(int next_eip_addend)
992
{
993
    int selector;
994

    
995
    if (!(env->efer & MSR_EFER_SCE)) {
996
        raise_exception_err(EXCP06_ILLOP, 0);
997
    }
998
    selector = (env->star >> 32) & 0xffff;
999
#ifdef TARGET_X86_64
1000
    if (env->hflags & HF_LMA_MASK) {
1001
        int code64;
1002

    
1003
        ECX = env->eip + next_eip_addend;
1004
        env->regs[11] = compute_eflags();
1005

    
1006
        code64 = env->hflags & HF_CS64_MASK;
1007

    
1008
        cpu_x86_set_cpl(env, 0);
1009
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010
                           0, 0xffffffff,
1011
                               DESC_G_MASK | DESC_P_MASK |
1012
                               DESC_S_MASK |
1013
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015
                               0, 0xffffffff,
1016
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017
                               DESC_S_MASK |
1018
                               DESC_W_MASK | DESC_A_MASK);
1019
        env->eflags &= ~env->fmask;
1020
        load_eflags(env->eflags, 0);
1021
        if (code64)
1022
            env->eip = env->lstar;
1023
        else
1024
            env->eip = env->cstar;
1025
    } else
1026
#endif
1027
    {
1028
        ECX = (uint32_t)(env->eip + next_eip_addend);
1029

    
1030
        cpu_x86_set_cpl(env, 0);
1031
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032
                           0, 0xffffffff,
1033
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034
                               DESC_S_MASK |
1035
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037
                               0, 0xffffffff,
1038
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039
                               DESC_S_MASK |
1040
                               DESC_W_MASK | DESC_A_MASK);
1041
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042
        env->eip = (uint32_t)env->star;
1043
    }
1044
}
1045
#endif
1046

    
1047
void helper_sysret(int dflag)
1048
{
1049
    int cpl, selector;
1050

    
1051
    if (!(env->efer & MSR_EFER_SCE)) {
1052
        raise_exception_err(EXCP06_ILLOP, 0);
1053
    }
1054
    cpl = env->hflags & HF_CPL_MASK;
1055
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056
        raise_exception_err(EXCP0D_GPF, 0);
1057
    }
1058
    selector = (env->star >> 48) & 0xffff;
1059
#ifdef TARGET_X86_64
1060
    if (env->hflags & HF_LMA_MASK) {
1061
        if (dflag == 2) {
1062
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063
                                   0, 0xffffffff,
1064
                                   DESC_G_MASK | DESC_P_MASK |
1065
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067
                                   DESC_L_MASK);
1068
            env->eip = ECX;
1069
        } else {
1070
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071
                                   0, 0xffffffff,
1072
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075
            env->eip = (uint32_t)ECX;
1076
        }
1077
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1078
                               0, 0xffffffff,
1079
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                               DESC_W_MASK | DESC_A_MASK);
1082
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084
        cpu_x86_set_cpl(env, 3);
1085
    } else
1086
#endif
1087
    {
1088
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089
                               0, 0xffffffff,
1090
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093
        env->eip = (uint32_t)ECX;
1094
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095
                               0, 0xffffffff,
1096
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098
                               DESC_W_MASK | DESC_A_MASK);
1099
        env->eflags |= IF_MASK;
1100
        cpu_x86_set_cpl(env, 3);
1101
    }
1102
#ifdef USE_KQEMU
1103
    if (kqemu_is_ok(env)) {
1104
        if (env->hflags & HF_LMA_MASK)
1105
            CC_OP = CC_OP_EFLAGS;
1106
        env->exception_index = -1;
1107
        cpu_loop_exit();
1108
    }
1109
#endif
1110
}
1111

    
1112
/* real mode interrupt */
1113
static void do_interrupt_real(int intno, int is_int, int error_code,
1114
                              unsigned int next_eip)
1115
{
1116
    SegmentCache *dt;
1117
    target_ulong ptr, ssp;
1118
    int selector;
1119
    uint32_t offset, esp;
1120
    uint32_t old_cs, old_eip;
1121

    
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privilege if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    int first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    int second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    } else {
1280
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1281
    }
1282

    
1283
    env->exception_index = intno;
1284
    env->error_code = error_code;
1285
    env->exception_is_int = is_int;
1286
    env->exception_next_eip = env->eip + next_eip_addend;
1287
    cpu_loop_exit();
1288
}
1289

    
1290
/* shortcuts to generate exceptions */
1291

    
1292
void (raise_exception_err)(int exception_index, int error_code)
1293
{
1294
    raise_interrupt(exception_index, 0, error_code, 0);
1295
}
1296

    
1297
void raise_exception(int exception_index)
1298
{
1299
    raise_interrupt(exception_index, 0, 0, 0);
1300
}
1301

    
1302
/* SMM support */
1303

    
1304
#if defined(CONFIG_USER_ONLY)
1305

    
1306
void do_smm_enter(void)
1307
{
1308
}
1309

    
1310
void helper_rsm(void)
1311
{
1312
}
1313

    
1314
#else
1315

    
1316
#ifdef TARGET_X86_64
1317
#define SMM_REVISION_ID 0x00020064
1318
#else
1319
#define SMM_REVISION_ID 0x00020000
1320
#endif
1321

    
1322
void do_smm_enter(void)
1323
{
1324
    target_ulong sm_state;
1325
    SegmentCache *dt;
1326
    int i, offset;
1327

    
1328
    if (loglevel & CPU_LOG_INT) {
1329
        fprintf(logfile, "SMM: enter\n");
1330
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1331
    }
1332

    
1333
    env->hflags |= HF_SMM_MASK;
1334
    cpu_smm_update(env);
1335

    
1336
    sm_state = env->smbase + 0x8000;
1337

    
1338
#ifdef TARGET_X86_64
1339
    for(i = 0; i < 6; i++) {
1340
        dt = &env->segs[i];
1341
        offset = 0x7e00 + i * 16;
1342
        stw_phys(sm_state + offset, dt->selector);
1343
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344
        stl_phys(sm_state + offset + 4, dt->limit);
1345
        stq_phys(sm_state + offset + 8, dt->base);
1346
    }
1347

    
1348
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1349
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1350

    
1351
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1353
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1355

    
1356
    stq_phys(sm_state + 0x7e88, env->idt.base);
1357
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1358

    
1359
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1360
    stq_phys(sm_state + 0x7e98, env->tr.base);
1361
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1362
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1363

    
1364
    stq_phys(sm_state + 0x7ed0, env->efer);
1365

    
1366
    stq_phys(sm_state + 0x7ff8, EAX);
1367
    stq_phys(sm_state + 0x7ff0, ECX);
1368
    stq_phys(sm_state + 0x7fe8, EDX);
1369
    stq_phys(sm_state + 0x7fe0, EBX);
1370
    stq_phys(sm_state + 0x7fd8, ESP);
1371
    stq_phys(sm_state + 0x7fd0, EBP);
1372
    stq_phys(sm_state + 0x7fc8, ESI);
1373
    stq_phys(sm_state + 0x7fc0, EDI);
1374
    for(i = 8; i < 16; i++)
1375
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376
    stq_phys(sm_state + 0x7f78, env->eip);
1377
    stl_phys(sm_state + 0x7f70, compute_eflags());
1378
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1379
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1380

    
1381
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1382
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1383
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1384

    
1385
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386
    stl_phys(sm_state + 0x7f00, env->smbase);
1387
#else
1388
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1391
    stl_phys(sm_state + 0x7ff0, env->eip);
1392
    stl_phys(sm_state + 0x7fec, EDI);
1393
    stl_phys(sm_state + 0x7fe8, ESI);
1394
    stl_phys(sm_state + 0x7fe4, EBP);
1395
    stl_phys(sm_state + 0x7fe0, ESP);
1396
    stl_phys(sm_state + 0x7fdc, EBX);
1397
    stl_phys(sm_state + 0x7fd8, EDX);
1398
    stl_phys(sm_state + 0x7fd4, ECX);
1399
    stl_phys(sm_state + 0x7fd0, EAX);
1400
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1402

    
1403
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404
    stl_phys(sm_state + 0x7f64, env->tr.base);
1405
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1406
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1407

    
1408
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1410
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1412

    
1413
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1414
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1415

    
1416
    stl_phys(sm_state + 0x7f58, env->idt.base);
1417
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1418

    
1419
    for(i = 0; i < 6; i++) {
1420
        dt = &env->segs[i];
1421
        if (i < 3)
1422
            offset = 0x7f84 + i * 12;
1423
        else
1424
            offset = 0x7f2c + (i - 3) * 12;
1425
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426
        stl_phys(sm_state + offset + 8, dt->base);
1427
        stl_phys(sm_state + offset + 4, dt->limit);
1428
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1429
    }
1430
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1431

    
1432
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433
    stl_phys(sm_state + 0x7ef8, env->smbase);
1434
#endif
1435
    /* init SMM cpu state */
1436

    
1437
#ifdef TARGET_X86_64
1438
    cpu_load_efer(env, 0);
1439
#endif
1440
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1441
    env->eip = 0x00008000;
1442
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1443
                           0xffffffff, 0);
1444
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1445
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1446
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1447
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1448
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1449

    
1450
    cpu_x86_update_cr0(env,
1451
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1452
    cpu_x86_update_cr4(env, 0);
1453
    env->dr[7] = 0x00000400;
1454
    CC_OP = CC_OP_EFLAGS;
1455
}
1456

    
1457
void helper_rsm(void)
1458
{
1459
    target_ulong sm_state;
1460
    int i, offset;
1461
    uint32_t val;
1462

    
1463
    sm_state = env->smbase + 0x8000;
1464
#ifdef TARGET_X86_64
1465
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1466

    
1467
    for(i = 0; i < 6; i++) {
1468
        offset = 0x7e00 + i * 16;
1469
        cpu_x86_load_seg_cache(env, i,
1470
                               lduw_phys(sm_state + offset),
1471
                               ldq_phys(sm_state + offset + 8),
1472
                               ldl_phys(sm_state + offset + 4),
1473
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1474
    }
1475

    
1476
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1478

    
1479
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1483

    
1484
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1485
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1486

    
1487
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1489
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1491

    
1492
    EAX = ldq_phys(sm_state + 0x7ff8);
1493
    ECX = ldq_phys(sm_state + 0x7ff0);
1494
    EDX = ldq_phys(sm_state + 0x7fe8);
1495
    EBX = ldq_phys(sm_state + 0x7fe0);
1496
    ESP = ldq_phys(sm_state + 0x7fd8);
1497
    EBP = ldq_phys(sm_state + 0x7fd0);
1498
    ESI = ldq_phys(sm_state + 0x7fc8);
1499
    EDI = ldq_phys(sm_state + 0x7fc0);
1500
    for(i = 8; i < 16; i++)
1501
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502
    env->eip = ldq_phys(sm_state + 0x7f78);
1503
    load_eflags(ldl_phys(sm_state + 0x7f70),
1504
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1507

    
1508
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1511

    
1512
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513
    if (val & 0x20000) {
1514
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1515
    }
1516
#else
1517
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1519
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1520
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521
    env->eip = ldl_phys(sm_state + 0x7ff0);
1522
    EDI = ldl_phys(sm_state + 0x7fec);
1523
    ESI = ldl_phys(sm_state + 0x7fe8);
1524
    EBP = ldl_phys(sm_state + 0x7fe4);
1525
    ESP = ldl_phys(sm_state + 0x7fe0);
1526
    EBX = ldl_phys(sm_state + 0x7fdc);
1527
    EDX = ldl_phys(sm_state + 0x7fd8);
1528
    ECX = ldl_phys(sm_state + 0x7fd4);
1529
    EAX = ldl_phys(sm_state + 0x7fd0);
1530
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1532

    
1533
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1535
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1537

    
1538
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1542

    
1543
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1545

    
1546
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1547
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1548

    
1549
    for(i = 0; i < 6; i++) {
1550
        if (i < 3)
1551
            offset = 0x7f84 + i * 12;
1552
        else
1553
            offset = 0x7f2c + (i - 3) * 12;
1554
        cpu_x86_load_seg_cache(env, i,
1555
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556
                               ldl_phys(sm_state + offset + 8),
1557
                               ldl_phys(sm_state + offset + 4),
1558
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1559
    }
1560
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1561

    
1562
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563
    if (val & 0x20000) {
1564
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1565
    }
1566
#endif
1567
    CC_OP = CC_OP_EFLAGS;
1568
    env->hflags &= ~HF_SMM_MASK;
1569
    cpu_smm_update(env);
1570

    
1571
    if (loglevel & CPU_LOG_INT) {
1572
        fprintf(logfile, "SMM: after RSM\n");
1573
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1574
    }
1575
}
1576

    
1577
#endif /* !CONFIG_USER_ONLY */
1578

    
1579

    
1580
/* division, flags are undefined */
1581

    
1582
void helper_divb_AL(target_ulong t0)
1583
{
1584
    unsigned int num, den, q, r;
1585

    
1586
    num = (EAX & 0xffff);
1587
    den = (t0 & 0xff);
1588
    if (den == 0) {
1589
        raise_exception(EXCP00_DIVZ);
1590
    }
1591
    q = (num / den);
1592
    if (q > 0xff)
1593
        raise_exception(EXCP00_DIVZ);
1594
    q &= 0xff;
1595
    r = (num % den) & 0xff;
1596
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1597
}
1598

    
1599
void helper_idivb_AL(target_ulong t0)
1600
{
1601
    int num, den, q, r;
1602

    
1603
    num = (int16_t)EAX;
1604
    den = (int8_t)t0;
1605
    if (den == 0) {
1606
        raise_exception(EXCP00_DIVZ);
1607
    }
1608
    q = (num / den);
1609
    if (q != (int8_t)q)
1610
        raise_exception(EXCP00_DIVZ);
1611
    q &= 0xff;
1612
    r = (num % den) & 0xff;
1613
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1614
}
1615

    
1616
void helper_divw_AX(target_ulong t0)
1617
{
1618
    unsigned int num, den, q, r;
1619

    
1620
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1621
    den = (t0 & 0xffff);
1622
    if (den == 0) {
1623
        raise_exception(EXCP00_DIVZ);
1624
    }
1625
    q = (num / den);
1626
    if (q > 0xffff)
1627
        raise_exception(EXCP00_DIVZ);
1628
    q &= 0xffff;
1629
    r = (num % den) & 0xffff;
1630
    EAX = (EAX & ~0xffff) | q;
1631
    EDX = (EDX & ~0xffff) | r;
1632
}
1633

    
1634
void helper_idivw_AX(target_ulong t0)
1635
{
1636
    int num, den, q, r;
1637

    
1638
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1639
    den = (int16_t)t0;
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
    q = (num / den);
1644
    if (q != (int16_t)q)
1645
        raise_exception(EXCP00_DIVZ);
1646
    q &= 0xffff;
1647
    r = (num % den) & 0xffff;
1648
    EAX = (EAX & ~0xffff) | q;
1649
    EDX = (EDX & ~0xffff) | r;
1650
}
1651

    
1652
void helper_divl_EAX(target_ulong t0)
1653
{
1654
    unsigned int den, r;
1655
    uint64_t num, q;
1656

    
1657
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1658
    den = t0;
1659
    if (den == 0) {
1660
        raise_exception(EXCP00_DIVZ);
1661
    }
1662
    q = (num / den);
1663
    r = (num % den);
1664
    if (q > 0xffffffff)
1665
        raise_exception(EXCP00_DIVZ);
1666
    EAX = (uint32_t)q;
1667
    EDX = (uint32_t)r;
1668
}
1669

    
1670
void helper_idivl_EAX(target_ulong t0)
1671
{
1672
    int den, r;
1673
    int64_t num, q;
1674

    
1675
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1676
    den = t0;
1677
    if (den == 0) {
1678
        raise_exception(EXCP00_DIVZ);
1679
    }
1680
    q = (num / den);
1681
    r = (num % den);
1682
    if (q != (int32_t)q)
1683
        raise_exception(EXCP00_DIVZ);
1684
    EAX = (uint32_t)q;
1685
    EDX = (uint32_t)r;
1686
}
1687

    
1688
/* bcd */
1689

    
1690
/* XXX: exception */
1691
void helper_aam(int base)
1692
{
1693
    int al, ah;
1694
    al = EAX & 0xff;
1695
    ah = al / base;
1696
    al = al % base;
1697
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1698
    CC_DST = al;
1699
}
1700

    
1701
void helper_aad(int base)
1702
{
1703
    int al, ah;
1704
    al = EAX & 0xff;
1705
    ah = (EAX >> 8) & 0xff;
1706
    al = ((ah * base) + al) & 0xff;
1707
    EAX = (EAX & ~0xffff) | al;
1708
    CC_DST = al;
1709
}
1710

    
1711
void helper_aaa(void)
1712
{
1713
    int icarry;
1714
    int al, ah, af;
1715
    int eflags;
1716

    
1717
    eflags = cc_table[CC_OP].compute_all();
1718
    af = eflags & CC_A;
1719
    al = EAX & 0xff;
1720
    ah = (EAX >> 8) & 0xff;
1721

    
1722
    icarry = (al > 0xf9);
1723
    if (((al & 0x0f) > 9 ) || af) {
1724
        al = (al + 6) & 0x0f;
1725
        ah = (ah + 1 + icarry) & 0xff;
1726
        eflags |= CC_C | CC_A;
1727
    } else {
1728
        eflags &= ~(CC_C | CC_A);
1729
        al &= 0x0f;
1730
    }
1731
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1732
    CC_SRC = eflags;
1733
    FORCE_RET();
1734
}
1735

    
1736
void helper_aas(void)
1737
{
1738
    int icarry;
1739
    int al, ah, af;
1740
    int eflags;
1741

    
1742
    eflags = cc_table[CC_OP].compute_all();
1743
    af = eflags & CC_A;
1744
    al = EAX & 0xff;
1745
    ah = (EAX >> 8) & 0xff;
1746

    
1747
    icarry = (al < 6);
1748
    if (((al & 0x0f) > 9 ) || af) {
1749
        al = (al - 6) & 0x0f;
1750
        ah = (ah - 1 - icarry) & 0xff;
1751
        eflags |= CC_C | CC_A;
1752
    } else {
1753
        eflags &= ~(CC_C | CC_A);
1754
        al &= 0x0f;
1755
    }
1756
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1757
    CC_SRC = eflags;
1758
    FORCE_RET();
1759
}
1760

    
1761
void helper_daa(void)
1762
{
1763
    int al, af, cf;
1764
    int eflags;
1765

    
1766
    eflags = cc_table[CC_OP].compute_all();
1767
    cf = eflags & CC_C;
1768
    af = eflags & CC_A;
1769
    al = EAX & 0xff;
1770

    
1771
    eflags = 0;
1772
    if (((al & 0x0f) > 9 ) || af) {
1773
        al = (al + 6) & 0xff;
1774
        eflags |= CC_A;
1775
    }
1776
    if ((al > 0x9f) || cf) {
1777
        al = (al + 0x60) & 0xff;
1778
        eflags |= CC_C;
1779
    }
1780
    EAX = (EAX & ~0xff) | al;
1781
    /* well, speed is not an issue here, so we compute the flags by hand */
1782
    eflags |= (al == 0) << 6; /* zf */
1783
    eflags |= parity_table[al]; /* pf */
1784
    eflags |= (al & 0x80); /* sf */
1785
    CC_SRC = eflags;
1786
    FORCE_RET();
1787
}
1788

    
1789
void helper_das(void)
1790
{
1791
    int al, al1, af, cf;
1792
    int eflags;
1793

    
1794
    eflags = cc_table[CC_OP].compute_all();
1795
    cf = eflags & CC_C;
1796
    af = eflags & CC_A;
1797
    al = EAX & 0xff;
1798

    
1799
    eflags = 0;
1800
    al1 = al;
1801
    if (((al & 0x0f) > 9 ) || af) {
1802
        eflags |= CC_A;
1803
        if (al < 6 || cf)
1804
            eflags |= CC_C;
1805
        al = (al - 6) & 0xff;
1806
    }
1807
    if ((al1 > 0x99) || cf) {
1808
        al = (al - 0x60) & 0xff;
1809
        eflags |= CC_C;
1810
    }
1811
    EAX = (EAX & ~0xff) | al;
1812
    /* well, speed is not an issue here, so we compute the flags by hand */
1813
    eflags |= (al == 0) << 6; /* zf */
1814
    eflags |= parity_table[al]; /* pf */
1815
    eflags |= (al & 0x80); /* sf */
1816
    CC_SRC = eflags;
1817
    FORCE_RET();
1818
}
1819

    
1820
void helper_into(int next_eip_addend)
1821
{
1822
    int eflags;
1823
    eflags = cc_table[CC_OP].compute_all();
1824
    if (eflags & CC_O) {
1825
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1826
    }
1827
}
1828

    
1829
void helper_cmpxchg8b(target_ulong a0)
1830
{
1831
    uint64_t d;
1832
    int eflags;
1833

    
1834
    eflags = cc_table[CC_OP].compute_all();
1835
    d = ldq(a0);
1836
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1837
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1838
        eflags |= CC_Z;
1839
    } else {
1840
        EDX = (uint32_t)(d >> 32);
1841
        EAX = (uint32_t)d;
1842
        eflags &= ~CC_Z;
1843
    }
1844
    CC_SRC = eflags;
1845
}
1846

    
1847
#ifdef TARGET_X86_64
1848
void helper_cmpxchg16b(target_ulong a0)
1849
{
1850
    uint64_t d0, d1;
1851
    int eflags;
1852

    
1853
    eflags = cc_table[CC_OP].compute_all();
1854
    d0 = ldq(a0);
1855
    d1 = ldq(a0 + 8);
1856
    if (d0 == EAX && d1 == EDX) {
1857
        stq(a0, EBX);
1858
        stq(a0 + 8, ECX);
1859
        eflags |= CC_Z;
1860
    } else {
1861
        EDX = d1;
1862
        EAX = d0;
1863
        eflags &= ~CC_Z;
1864
    }
1865
    CC_SRC = eflags;
1866
}
1867
#endif
1868

    
1869
void helper_single_step(void)
1870
{
1871
    env->dr[6] |= 0x4000;
1872
    raise_exception(EXCP01_SSTP);
1873
}
1874

    
1875
void helper_cpuid(void)
1876
{
1877
    uint32_t index;
1878

    
1879
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1880
    
1881
    index = (uint32_t)EAX;
1882
    /* test if maximum index reached */
1883
    if (index & 0x80000000) {
1884
        if (index > env->cpuid_xlevel)
1885
            index = env->cpuid_level;
1886
    } else {
1887
        if (index > env->cpuid_level)
1888
            index = env->cpuid_level;
1889
    }
1890

    
1891
    switch(index) {
1892
    case 0:
1893
        EAX = env->cpuid_level;
1894
        EBX = env->cpuid_vendor1;
1895
        EDX = env->cpuid_vendor2;
1896
        ECX = env->cpuid_vendor3;
1897
        break;
1898
    case 1:
1899
        EAX = env->cpuid_version;
1900
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1901
        ECX = env->cpuid_ext_features;
1902
        EDX = env->cpuid_features;
1903
        break;
1904
    case 2:
1905
        /* cache info: needed for Pentium Pro compatibility */
1906
        EAX = 1;
1907
        EBX = 0;
1908
        ECX = 0;
1909
        EDX = 0x2c307d;
1910
        break;
1911
    case 0x80000000:
1912
        EAX = env->cpuid_xlevel;
1913
        EBX = env->cpuid_vendor1;
1914
        EDX = env->cpuid_vendor2;
1915
        ECX = env->cpuid_vendor3;
1916
        break;
1917
    case 0x80000001:
1918
        EAX = env->cpuid_features;
1919
        EBX = 0;
1920
        ECX = env->cpuid_ext3_features;
1921
        EDX = env->cpuid_ext2_features;
1922
        break;
1923
    case 0x80000002:
1924
    case 0x80000003:
1925
    case 0x80000004:
1926
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1927
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1928
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1929
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1930
        break;
1931
    case 0x80000005:
1932
        /* cache info (L1 cache) */
1933
        EAX = 0x01ff01ff;
1934
        EBX = 0x01ff01ff;
1935
        ECX = 0x40020140;
1936
        EDX = 0x40020140;
1937
        break;
1938
    case 0x80000006:
1939
        /* cache info (L2 cache) */
1940
        EAX = 0;
1941
        EBX = 0x42004200;
1942
        ECX = 0x02008140;
1943
        EDX = 0;
1944
        break;
1945
    case 0x80000008:
1946
        /* virtual & phys address size in low 2 bytes. */
1947
/* XXX: This value must match the one used in the MMU code. */ 
1948
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1949
            /* 64 bit processor */
1950
#if defined(USE_KQEMU)
1951
            EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1952
#else
1953
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1954
            EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1955
#endif
1956
        } else {
1957
#if defined(USE_KQEMU)
1958
            EAX = 0x00000020;        /* 32 bits physical */
1959
#else
1960
            EAX = 0x00000024;        /* 36 bits physical */
1961
#endif
1962
        }
1963
        EBX = 0;
1964
        ECX = 0;
1965
        EDX = 0;
1966
        break;
1967
    case 0x8000000A:
1968
        EAX = 0x00000001;
1969
        EBX = 0;
1970
        ECX = 0;
1971
        EDX = 0;
1972
        break;
1973
    default:
1974
        /* reserved values: zero */
1975
        EAX = 0;
1976
        EBX = 0;
1977
        ECX = 0;
1978
        EDX = 0;
1979
        break;
1980
    }
1981
}
1982

    
1983
void helper_enter_level(int level, int data32, target_ulong t1)
1984
{
1985
    target_ulong ssp;
1986
    uint32_t esp_mask, esp, ebp;
1987

    
1988
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1989
    ssp = env->segs[R_SS].base;
1990
    ebp = EBP;
1991
    esp = ESP;
1992
    if (data32) {
1993
        /* 32 bit */
1994
        esp -= 4;
1995
        while (--level) {
1996
            esp -= 4;
1997
            ebp -= 4;
1998
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1999
        }
2000
        esp -= 4;
2001
        stl(ssp + (esp & esp_mask), t1);
2002
    } else {
2003
        /* 16 bit */
2004
        esp -= 2;
2005
        while (--level) {
2006
            esp -= 2;
2007
            ebp -= 2;
2008
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2009
        }
2010
        esp -= 2;
2011
        stw(ssp + (esp & esp_mask), t1);
2012
    }
2013
}
2014

    
2015
#ifdef TARGET_X86_64
2016
void helper_enter64_level(int level, int data64, target_ulong t1)
2017
{
2018
    target_ulong esp, ebp;
2019
    ebp = EBP;
2020
    esp = ESP;
2021

    
2022
    if (data64) {
2023
        /* 64 bit */
2024
        esp -= 8;
2025
        while (--level) {
2026
            esp -= 8;
2027
            ebp -= 8;
2028
            stq(esp, ldq(ebp));
2029
        }
2030
        esp -= 8;
2031
        stq(esp, t1);
2032
    } else {
2033
        /* 16 bit */
2034
        esp -= 2;
2035
        while (--level) {
2036
            esp -= 2;
2037
            ebp -= 2;
2038
            stw(esp, lduw(ebp));
2039
        }
2040
        esp -= 2;
2041
        stw(esp, t1);
2042
    }
2043
}
2044
#endif
2045

    
2046
void helper_lldt(int selector)
2047
{
2048
    SegmentCache *dt;
2049
    uint32_t e1, e2;
2050
    int index, entry_limit;
2051
    target_ulong ptr;
2052

    
2053
    selector &= 0xffff;
2054
    if ((selector & 0xfffc) == 0) {
2055
        /* XXX: NULL selector case: invalid LDT */
2056
        env->ldt.base = 0;
2057
        env->ldt.limit = 0;
2058
    } else {
2059
        if (selector & 0x4)
2060
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061
        dt = &env->gdt;
2062
        index = selector & ~7;
2063
#ifdef TARGET_X86_64
2064
        if (env->hflags & HF_LMA_MASK)
2065
            entry_limit = 15;
2066
        else
2067
#endif
2068
            entry_limit = 7;
2069
        if ((index + entry_limit) > dt->limit)
2070
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2071
        ptr = dt->base + index;
2072
        e1 = ldl_kernel(ptr);
2073
        e2 = ldl_kernel(ptr + 4);
2074
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2075
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2076
        if (!(e2 & DESC_P_MASK))
2077
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2078
#ifdef TARGET_X86_64
2079
        if (env->hflags & HF_LMA_MASK) {
2080
            uint32_t e3;
2081
            e3 = ldl_kernel(ptr + 8);
2082
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2083
            env->ldt.base |= (target_ulong)e3 << 32;
2084
        } else
2085
#endif
2086
        {
2087
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2088
        }
2089
    }
2090
    env->ldt.selector = selector;
2091
}
2092

    
2093
void helper_ltr(int selector)
2094
{
2095
    SegmentCache *dt;
2096
    uint32_t e1, e2;
2097
    int index, type, entry_limit;
2098
    target_ulong ptr;
2099

    
2100
    selector &= 0xffff;
2101
    if ((selector & 0xfffc) == 0) {
2102
        /* NULL selector case: invalid TR */
2103
        env->tr.base = 0;
2104
        env->tr.limit = 0;
2105
        env->tr.flags = 0;
2106
    } else {
2107
        if (selector & 0x4)
2108
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109
        dt = &env->gdt;
2110
        index = selector & ~7;
2111
#ifdef TARGET_X86_64
2112
        if (env->hflags & HF_LMA_MASK)
2113
            entry_limit = 15;
2114
        else
2115
#endif
2116
            entry_limit = 7;
2117
        if ((index + entry_limit) > dt->limit)
2118
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
        ptr = dt->base + index;
2120
        e1 = ldl_kernel(ptr);
2121
        e2 = ldl_kernel(ptr + 4);
2122
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2123
        if ((e2 & DESC_S_MASK) ||
2124
            (type != 1 && type != 9))
2125
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126
        if (!(e2 & DESC_P_MASK))
2127
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2128
#ifdef TARGET_X86_64
2129
        if (env->hflags & HF_LMA_MASK) {
2130
            uint32_t e3, e4;
2131
            e3 = ldl_kernel(ptr + 8);
2132
            e4 = ldl_kernel(ptr + 12);
2133
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2134
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2136
            env->tr.base |= (target_ulong)e3 << 32;
2137
        } else
2138
#endif
2139
        {
2140
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2141
        }
2142
        e2 |= DESC_TSS_BUSY_MASK;
2143
        stl_kernel(ptr + 4, e2);
2144
    }
2145
    env->tr.selector = selector;
2146
}
2147

    
2148
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2149
void helper_load_seg(int seg_reg, int selector)
2150
{
2151
    uint32_t e1, e2;
2152
    int cpl, dpl, rpl;
2153
    SegmentCache *dt;
2154
    int index;
2155
    target_ulong ptr;
2156

    
2157
    selector &= 0xffff;
2158
    cpl = env->hflags & HF_CPL_MASK;
2159
    if ((selector & 0xfffc) == 0) {
2160
        /* null selector case */
2161
        if (seg_reg == R_SS
2162
#ifdef TARGET_X86_64
2163
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2164
#endif
2165
            )
2166
            raise_exception_err(EXCP0D_GPF, 0);
2167
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2168
    } else {
2169

    
2170
        if (selector & 0x4)
2171
            dt = &env->ldt;
2172
        else
2173
            dt = &env->gdt;
2174
        index = selector & ~7;
2175
        if ((index + 7) > dt->limit)
2176
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2177
        ptr = dt->base + index;
2178
        e1 = ldl_kernel(ptr);
2179
        e2 = ldl_kernel(ptr + 4);
2180

    
2181
        if (!(e2 & DESC_S_MASK))
2182
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183
        rpl = selector & 3;
2184
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2185
        if (seg_reg == R_SS) {
2186
            /* must be writable segment */
2187
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2188
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2189
            if (rpl != cpl || dpl != cpl)
2190
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2191
        } else {
2192
            /* must be readable segment */
2193
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2194
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2195

    
2196
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2197
                /* if not conforming code, test rights */
2198
                if (dpl < cpl || dpl < rpl)
2199
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2200
            }
2201
        }
2202

    
2203
        if (!(e2 & DESC_P_MASK)) {
2204
            if (seg_reg == R_SS)
2205
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2206
            else
2207
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2208
        }
2209

    
2210
        /* set the access bit if not already set */
2211
        if (!(e2 & DESC_A_MASK)) {
2212
            e2 |= DESC_A_MASK;
2213
            stl_kernel(ptr + 4, e2);
2214
        }
2215

    
2216
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2217
                       get_seg_base(e1, e2),
2218
                       get_seg_limit(e1, e2),
2219
                       e2);
2220
#if 0
2221
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2222
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2223
#endif
2224
    }
2225
}
2226

    
2227
/* protected mode jump */
2228
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2229
                           int next_eip_addend)
2230
{
2231
    int gate_cs, type;
2232
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2233
    target_ulong next_eip;
2234

    
2235
    if ((new_cs & 0xfffc) == 0)
2236
        raise_exception_err(EXCP0D_GPF, 0);
2237
    if (load_segment(&e1, &e2, new_cs) != 0)
2238
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2239
    cpl = env->hflags & HF_CPL_MASK;
2240
    if (e2 & DESC_S_MASK) {
2241
        if (!(e2 & DESC_CS_MASK))
2242
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2244
        if (e2 & DESC_C_MASK) {
2245
            /* conforming code segment */
2246
            if (dpl > cpl)
2247
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248
        } else {
2249
            /* non conforming code segment */
2250
            rpl = new_cs & 3;
2251
            if (rpl > cpl)
2252
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253
            if (dpl != cpl)
2254
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255
        }
2256
        if (!(e2 & DESC_P_MASK))
2257
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2258
        limit = get_seg_limit(e1, e2);
2259
        if (new_eip > limit &&
2260
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2261
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2263
                       get_seg_base(e1, e2), limit, e2);
2264
        EIP = new_eip;
2265
    } else {
2266
        /* jump to call or task gate */
2267
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2268
        rpl = new_cs & 3;
2269
        cpl = env->hflags & HF_CPL_MASK;
2270
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2271
        switch(type) {
2272
        case 1: /* 286 TSS */
2273
        case 9: /* 386 TSS */
2274
        case 5: /* task gate */
2275
            if (dpl < cpl || dpl < rpl)
2276
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277
            next_eip = env->eip + next_eip_addend;
2278
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2279
            CC_OP = CC_OP_EFLAGS;
2280
            break;
2281
        case 4: /* 286 call gate */
2282
        case 12: /* 386 call gate */
2283
            if ((dpl < cpl) || (dpl < rpl))
2284
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2285
            if (!(e2 & DESC_P_MASK))
2286
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2287
            gate_cs = e1 >> 16;
2288
            new_eip = (e1 & 0xffff);
2289
            if (type == 12)
2290
                new_eip |= (e2 & 0xffff0000);
2291
            if (load_segment(&e1, &e2, gate_cs) != 0)
2292
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2293
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2294
            /* must be code segment */
2295
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2296
                 (DESC_S_MASK | DESC_CS_MASK)))
2297
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2298
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2299
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2300
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2301
            if (!(e2 & DESC_P_MASK))
2302
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2303
            limit = get_seg_limit(e1, e2);
2304
            if (new_eip > limit)
2305
                raise_exception_err(EXCP0D_GPF, 0);
2306
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2307
                                   get_seg_base(e1, e2), limit, e2);
2308
            EIP = new_eip;
2309
            break;
2310
        default:
2311
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2312
            break;
2313
        }
2314
    }
2315
}
2316

    
2317
/* real mode call */
2318
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2319
                       int shift, int next_eip)
2320
{
2321
    int new_eip;
2322
    uint32_t esp, esp_mask;
2323
    target_ulong ssp;
2324

    
2325
    new_eip = new_eip1;
2326
    esp = ESP;
2327
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2328
    ssp = env->segs[R_SS].base;
2329
    if (shift) {
2330
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2331
        PUSHL(ssp, esp, esp_mask, next_eip);
2332
    } else {
2333
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2334
        PUSHW(ssp, esp, esp_mask, next_eip);
2335
    }
2336

    
2337
    SET_ESP(esp, esp_mask);
2338
    env->eip = new_eip;
2339
    env->segs[R_CS].selector = new_cs;
2340
    env->segs[R_CS].base = (new_cs << 4);
2341
}
2342

    
2343
/* protected mode call */
2344
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2345
                            int shift, int next_eip_addend)
2346
{
2347
    int new_stack, i;
2348
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2349
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2350
    uint32_t val, limit, old_sp_mask;
2351
    target_ulong ssp, old_ssp, next_eip;
2352

    
2353
    next_eip = env->eip + next_eip_addend;
2354
#ifdef DEBUG_PCALL
2355
    if (loglevel & CPU_LOG_PCALL) {
2356
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2357
                new_cs, (uint32_t)new_eip, shift);
2358
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2359
    }
2360
#endif
2361
    if ((new_cs & 0xfffc) == 0)
2362
        raise_exception_err(EXCP0D_GPF, 0);
2363
    if (load_segment(&e1, &e2, new_cs) != 0)
2364
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365
    cpl = env->hflags & HF_CPL_MASK;
2366
#ifdef DEBUG_PCALL
2367
    if (loglevel & CPU_LOG_PCALL) {
2368
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2369
    }
2370
#endif
2371
    if (e2 & DESC_S_MASK) {
2372
        if (!(e2 & DESC_CS_MASK))
2373
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2375
        if (e2 & DESC_C_MASK) {
2376
            /* conforming code segment */
2377
            if (dpl > cpl)
2378
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2379
        } else {
2380
            /* non conforming code segment */
2381
            rpl = new_cs & 3;
2382
            if (rpl > cpl)
2383
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384
            if (dpl != cpl)
2385
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386
        }
2387
        if (!(e2 & DESC_P_MASK))
2388
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2389

    
2390
#ifdef TARGET_X86_64
2391
        /* XXX: check 16/32 bit cases in long mode */
2392
        if (shift == 2) {
2393
            target_ulong rsp;
2394
            /* 64 bit case */
2395
            rsp = ESP;
2396
            PUSHQ(rsp, env->segs[R_CS].selector);
2397
            PUSHQ(rsp, next_eip);
2398
            /* from this point, not restartable */
2399
            ESP = rsp;
2400
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2401
                                   get_seg_base(e1, e2),
2402
                                   get_seg_limit(e1, e2), e2);
2403
            EIP = new_eip;
2404
        } else
2405
#endif
2406
        {
2407
            sp = ESP;
2408
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2409
            ssp = env->segs[R_SS].base;
2410
            if (shift) {
2411
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2412
                PUSHL(ssp, sp, sp_mask, next_eip);
2413
            } else {
2414
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2415
                PUSHW(ssp, sp, sp_mask, next_eip);
2416
            }
2417

    
2418
            limit = get_seg_limit(e1, e2);
2419
            if (new_eip > limit)
2420
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421
            /* from this point, not restartable */
2422
            SET_ESP(sp, sp_mask);
2423
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2424
                                   get_seg_base(e1, e2), limit, e2);
2425
            EIP = new_eip;
2426
        }
2427
    } else {
2428
        /* check gate type */
2429
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2430
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2431
        rpl = new_cs & 3;
2432
        switch(type) {
2433
        case 1: /* available 286 TSS */
2434
        case 9: /* available 386 TSS */
2435
        case 5: /* task gate */
2436
            if (dpl < cpl || dpl < rpl)
2437
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2438
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2439
            CC_OP = CC_OP_EFLAGS;
2440
            return;
2441
        case 4: /* 286 call gate */
2442
        case 12: /* 386 call gate */
2443
            break;
2444
        default:
2445
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446
            break;
2447
        }
2448
        shift = type >> 3;
2449

    
2450
        if (dpl < cpl || dpl < rpl)
2451
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452
        /* check valid bit */
2453
        if (!(e2 & DESC_P_MASK))
2454
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2455
        selector = e1 >> 16;
2456
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2457
        param_count = e2 & 0x1f;
2458
        if ((selector & 0xfffc) == 0)
2459
            raise_exception_err(EXCP0D_GPF, 0);
2460

    
2461
        if (load_segment(&e1, &e2, selector) != 0)
2462
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2464
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2466
        if (dpl > cpl)
2467
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468
        if (!(e2 & DESC_P_MASK))
2469
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2470

    
2471
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2472
            /* to inner privilege */
2473
            get_ss_esp_from_tss(&ss, &sp, dpl);
2474
#ifdef DEBUG_PCALL
2475
            if (loglevel & CPU_LOG_PCALL)
2476
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2477
                        ss, sp, param_count, ESP);
2478
#endif
2479
            if ((ss & 0xfffc) == 0)
2480
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2481
            if ((ss & 3) != dpl)
2482
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2483
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2484
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2486
            if (ss_dpl != dpl)
2487
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488
            if (!(ss_e2 & DESC_S_MASK) ||
2489
                (ss_e2 & DESC_CS_MASK) ||
2490
                !(ss_e2 & DESC_W_MASK))
2491
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492
            if (!(ss_e2 & DESC_P_MASK))
2493
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2494

    
2495
            //            push_size = ((param_count * 2) + 8) << shift;
2496

    
2497
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2498
            old_ssp = env->segs[R_SS].base;
2499

    
2500
            sp_mask = get_sp_mask(ss_e2);
2501
            ssp = get_seg_base(ss_e1, ss_e2);
2502
            if (shift) {
2503
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2504
                PUSHL(ssp, sp, sp_mask, ESP);
2505
                for(i = param_count - 1; i >= 0; i--) {
2506
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2507
                    PUSHL(ssp, sp, sp_mask, val);
2508
                }
2509
            } else {
2510
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2511
                PUSHW(ssp, sp, sp_mask, ESP);
2512
                for(i = param_count - 1; i >= 0; i--) {
2513
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2514
                    PUSHW(ssp, sp, sp_mask, val);
2515
                }
2516
            }
2517
            new_stack = 1;
2518
        } else {
2519
            /* to same privilege */
2520
            sp = ESP;
2521
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522
            ssp = env->segs[R_SS].base;
2523
            //            push_size = (4 << shift);
2524
            new_stack = 0;
2525
        }
2526

    
2527
        if (shift) {
2528
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529
            PUSHL(ssp, sp, sp_mask, next_eip);
2530
        } else {
2531
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2532
            PUSHW(ssp, sp, sp_mask, next_eip);
2533
        }
2534

    
2535
        /* from this point, not restartable */
2536

    
2537
        if (new_stack) {
2538
            ss = (ss & ~3) | dpl;
2539
            cpu_x86_load_seg_cache(env, R_SS, ss,
2540
                                   ssp,
2541
                                   get_seg_limit(ss_e1, ss_e2),
2542
                                   ss_e2);
2543
        }
2544

    
2545
        selector = (selector & ~3) | dpl;
2546
        cpu_x86_load_seg_cache(env, R_CS, selector,
2547
                       get_seg_base(e1, e2),
2548
                       get_seg_limit(e1, e2),
2549
                       e2);
2550
        cpu_x86_set_cpl(env, dpl);
2551
        SET_ESP(sp, sp_mask);
2552
        EIP = offset;
2553
    }
2554
#ifdef USE_KQEMU
2555
    if (kqemu_is_ok(env)) {
2556
        env->exception_index = -1;
2557
        cpu_loop_exit();
2558
    }
2559
#endif
2560
}
2561

    
2562
/* real and vm86 mode iret */
2563
void helper_iret_real(int shift)
2564
{
2565
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2566
    target_ulong ssp;
2567
    int eflags_mask;
2568

    
2569
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2570
    sp = ESP;
2571
    ssp = env->segs[R_SS].base;
2572
    if (shift == 1) {
2573
        /* 32 bits */
2574
        POPL(ssp, sp, sp_mask, new_eip);
2575
        POPL(ssp, sp, sp_mask, new_cs);
2576
        new_cs &= 0xffff;
2577
        POPL(ssp, sp, sp_mask, new_eflags);
2578
    } else {
2579
        /* 16 bits */
2580
        POPW(ssp, sp, sp_mask, new_eip);
2581
        POPW(ssp, sp, sp_mask, new_cs);
2582
        POPW(ssp, sp, sp_mask, new_eflags);
2583
    }
2584
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2585
    load_seg_vm(R_CS, new_cs);
2586
    env->eip = new_eip;
2587
    if (env->eflags & VM_MASK)
2588
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2589
    else
2590
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2591
    if (shift == 0)
2592
        eflags_mask &= 0xffff;
2593
    load_eflags(new_eflags, eflags_mask);
2594
    env->hflags &= ~HF_NMI_MASK;
2595
}
2596

    
2597
static inline void validate_seg(int seg_reg, int cpl)
2598
{
2599
    int dpl;
2600
    uint32_t e2;
2601

    
2602
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2603
       they may still contain a valid base. I would be interested to
2604
       know how a real x86_64 CPU behaves */
2605
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2606
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2607
        return;
2608

    
2609
    e2 = env->segs[seg_reg].flags;
2610
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2611
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2612
        /* data or non conforming code segment */
2613
        if (dpl < cpl) {
2614
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2615
        }
2616
    }
2617
}
2618

    
2619
/* protected mode iret */
2620
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2621
{
2622
    uint32_t new_cs, new_eflags, new_ss;
2623
    uint32_t new_es, new_ds, new_fs, new_gs;
2624
    uint32_t e1, e2, ss_e1, ss_e2;
2625
    int cpl, dpl, rpl, eflags_mask, iopl;
2626
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2627

    
2628
#ifdef TARGET_X86_64
2629
    if (shift == 2)
2630
        sp_mask = -1;
2631
    else
2632
#endif
2633
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2634
    sp = ESP;
2635
    ssp = env->segs[R_SS].base;
2636
    new_eflags = 0; /* avoid warning */
2637
#ifdef TARGET_X86_64
2638
    if (shift == 2) {
2639
        POPQ(sp, new_eip);
2640
        POPQ(sp, new_cs);
2641
        new_cs &= 0xffff;
2642
        if (is_iret) {
2643
            POPQ(sp, new_eflags);
2644
        }
2645
    } else
2646
#endif
2647
    if (shift == 1) {
2648
        /* 32 bits */
2649
        POPL(ssp, sp, sp_mask, new_eip);
2650
        POPL(ssp, sp, sp_mask, new_cs);
2651
        new_cs &= 0xffff;
2652
        if (is_iret) {
2653
            POPL(ssp, sp, sp_mask, new_eflags);
2654
            if (new_eflags & VM_MASK)
2655
                goto return_to_vm86;
2656
        }
2657
    } else {
2658
        /* 16 bits */
2659
        POPW(ssp, sp, sp_mask, new_eip);
2660
        POPW(ssp, sp, sp_mask, new_cs);
2661
        if (is_iret)
2662
            POPW(ssp, sp, sp_mask, new_eflags);
2663
    }
2664
#ifdef DEBUG_PCALL
2665
    if (loglevel & CPU_LOG_PCALL) {
2666
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2667
                new_cs, new_eip, shift, addend);
2668
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2669
    }
2670
#endif
2671
    if ((new_cs & 0xfffc) == 0)
2672
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2673
    if (load_segment(&e1, &e2, new_cs) != 0)
2674
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675
    if (!(e2 & DESC_S_MASK) ||
2676
        !(e2 & DESC_CS_MASK))
2677
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678
    cpl = env->hflags & HF_CPL_MASK;
2679
    rpl = new_cs & 3;
2680
    if (rpl < cpl)
2681
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2683
    if (e2 & DESC_C_MASK) {
2684
        if (dpl > rpl)
2685
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2686
    } else {
2687
        if (dpl != rpl)
2688
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2689
    }
2690
    if (!(e2 & DESC_P_MASK))
2691
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2692

    
2693
    sp += addend;
2694
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2695
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2696
        /* return to same privilege level */
2697
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2698
                       get_seg_base(e1, e2),
2699
                       get_seg_limit(e1, e2),
2700
                       e2);
2701
    } else {
2702
        /* return to different privilege level */
2703
#ifdef TARGET_X86_64
2704
        if (shift == 2) {
2705
            POPQ(sp, new_esp);
2706
            POPQ(sp, new_ss);
2707
            new_ss &= 0xffff;
2708
        } else
2709
#endif
2710
        if (shift == 1) {
2711
            /* 32 bits */
2712
            POPL(ssp, sp, sp_mask, new_esp);
2713
            POPL(ssp, sp, sp_mask, new_ss);
2714
            new_ss &= 0xffff;
2715
        } else {
2716
            /* 16 bits */
2717
            POPW(ssp, sp, sp_mask, new_esp);
2718
            POPW(ssp, sp, sp_mask, new_ss);
2719
        }
2720
#ifdef DEBUG_PCALL
2721
        if (loglevel & CPU_LOG_PCALL) {
2722
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2723
                    new_ss, new_esp);
2724
        }
2725
#endif
2726
        if ((new_ss & 0xfffc) == 0) {
2727
#ifdef TARGET_X86_64
2728
            /* NULL ss is allowed in long mode if cpl != 3*/
2729
            /* XXX: test CS64 ? */
2730
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2731
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2732
                                       0, 0xffffffff,
2733
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2734
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2735
                                       DESC_W_MASK | DESC_A_MASK);
2736
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2737
            } else
2738
#endif
2739
            {
2740
                raise_exception_err(EXCP0D_GPF, 0);
2741
            }
2742
        } else {
2743
            if ((new_ss & 3) != rpl)
2744
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2745
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2746
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2747
            if (!(ss_e2 & DESC_S_MASK) ||
2748
                (ss_e2 & DESC_CS_MASK) ||
2749
                !(ss_e2 & DESC_W_MASK))
2750
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2751
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2752
            if (dpl != rpl)
2753
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754
            if (!(ss_e2 & DESC_P_MASK))
2755
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2756
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2757
                                   get_seg_base(ss_e1, ss_e2),
2758
                                   get_seg_limit(ss_e1, ss_e2),
2759
                                   ss_e2);
2760
        }
2761

    
2762
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2763
                       get_seg_base(e1, e2),
2764
                       get_seg_limit(e1, e2),
2765
                       e2);
2766
        cpu_x86_set_cpl(env, rpl);
2767
        sp = new_esp;
2768
#ifdef TARGET_X86_64
2769
        if (env->hflags & HF_CS64_MASK)
2770
            sp_mask = -1;
2771
        else
2772
#endif
2773
            sp_mask = get_sp_mask(ss_e2);
2774

    
2775
        /* validate data segments */
2776
        validate_seg(R_ES, rpl);
2777
        validate_seg(R_DS, rpl);
2778
        validate_seg(R_FS, rpl);
2779
        validate_seg(R_GS, rpl);
2780

    
2781
        sp += addend;
2782
    }
2783
    SET_ESP(sp, sp_mask);
2784
    env->eip = new_eip;
2785
    if (is_iret) {
2786
        /* NOTE: 'cpl' is the _old_ CPL */
2787
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2788
        if (cpl == 0)
2789
            eflags_mask |= IOPL_MASK;
2790
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2791
        if (cpl <= iopl)
2792
            eflags_mask |= IF_MASK;
2793
        if (shift == 0)
2794
            eflags_mask &= 0xffff;
2795
        load_eflags(new_eflags, eflags_mask);
2796
    }
2797
    return;
2798

    
2799
 return_to_vm86:
2800
    POPL(ssp, sp, sp_mask, new_esp);
2801
    POPL(ssp, sp, sp_mask, new_ss);
2802
    POPL(ssp, sp, sp_mask, new_es);
2803
    POPL(ssp, sp, sp_mask, new_ds);
2804
    POPL(ssp, sp, sp_mask, new_fs);
2805
    POPL(ssp, sp, sp_mask, new_gs);
2806

    
2807
    /* modify processor state */
2808
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2809
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2810
    load_seg_vm(R_CS, new_cs & 0xffff);
2811
    cpu_x86_set_cpl(env, 3);
2812
    load_seg_vm(R_SS, new_ss & 0xffff);
2813
    load_seg_vm(R_ES, new_es & 0xffff);
2814
    load_seg_vm(R_DS, new_ds & 0xffff);
2815
    load_seg_vm(R_FS, new_fs & 0xffff);
2816
    load_seg_vm(R_GS, new_gs & 0xffff);
2817

    
2818
    env->eip = new_eip & 0xffff;
2819
    ESP = new_esp;
2820
}
2821

    
2822
void helper_iret_protected(int shift, int next_eip)
2823
{
2824
    int tss_selector, type;
2825
    uint32_t e1, e2;
2826

    
2827
    /* specific case for TSS */
2828
    if (env->eflags & NT_MASK) {
2829
#ifdef TARGET_X86_64
2830
        if (env->hflags & HF_LMA_MASK)
2831
            raise_exception_err(EXCP0D_GPF, 0);
2832
#endif
2833
        tss_selector = lduw_kernel(env->tr.base + 0);
2834
        if (tss_selector & 4)
2835
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2836
        if (load_segment(&e1, &e2, tss_selector) != 0)
2837
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2838
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2839
        /* NOTE: we check both segment and busy TSS */
2840
        if (type != 3)
2841
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2842
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2843
    } else {
2844
        helper_ret_protected(shift, 1, 0);
2845
    }
2846
    env->hflags &= ~HF_NMI_MASK;
2847
#ifdef USE_KQEMU
2848
    if (kqemu_is_ok(env)) {
2849
        CC_OP = CC_OP_EFLAGS;
2850
        env->exception_index = -1;
2851
        cpu_loop_exit();
2852
    }
2853
#endif
2854
}
2855

    
2856
void helper_lret_protected(int shift, int addend)
2857
{
2858
    helper_ret_protected(shift, 0, addend);
2859
#ifdef USE_KQEMU
2860
    if (kqemu_is_ok(env)) {
2861
        env->exception_index = -1;
2862
        cpu_loop_exit();
2863
    }
2864
#endif
2865
}
2866

    
2867
void helper_sysenter(void)
2868
{
2869
    if (env->sysenter_cs == 0) {
2870
        raise_exception_err(EXCP0D_GPF, 0);
2871
    }
2872
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2873
    cpu_x86_set_cpl(env, 0);
2874
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2875
                           0, 0xffffffff,
2876
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2877
                           DESC_S_MASK |
2878
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2879
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2880
                           0, 0xffffffff,
2881
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2882
                           DESC_S_MASK |
2883
                           DESC_W_MASK | DESC_A_MASK);
2884
    ESP = env->sysenter_esp;
2885
    EIP = env->sysenter_eip;
2886
}
2887

    
2888
void helper_sysexit(void)
2889
{
2890
    int cpl;
2891

    
2892
    cpl = env->hflags & HF_CPL_MASK;
2893
    if (env->sysenter_cs == 0 || cpl != 0) {
2894
        raise_exception_err(EXCP0D_GPF, 0);
2895
    }
2896
    cpu_x86_set_cpl(env, 3);
2897
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2898
                           0, 0xffffffff,
2899
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2900
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2901
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2902
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2903
                           0, 0xffffffff,
2904
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2905
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2906
                           DESC_W_MASK | DESC_A_MASK);
2907
    ESP = ECX;
2908
    EIP = EDX;
2909
#ifdef USE_KQEMU
2910
    if (kqemu_is_ok(env)) {
2911
        env->exception_index = -1;
2912
        cpu_loop_exit();
2913
    }
2914
#endif
2915
}
2916

    
2917
#if defined(CONFIG_USER_ONLY)
2918
target_ulong helper_read_crN(int reg)
2919
{
2920
    return 0;
2921
}
2922

    
2923
void helper_write_crN(int reg, target_ulong t0)
2924
{
2925
}
2926
#else
2927
target_ulong helper_read_crN(int reg)
2928
{
2929
    target_ulong val;
2930

    
2931
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2932
    switch(reg) {
2933
    default:
2934
        val = env->cr[reg];
2935
        break;
2936
    case 8:
2937
        val = cpu_get_apic_tpr(env);
2938
        break;
2939
    }
2940
    return val;
2941
}
2942

    
2943
void helper_write_crN(int reg, target_ulong t0)
2944
{
2945
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2946
    switch(reg) {
2947
    case 0:
2948
        cpu_x86_update_cr0(env, t0);
2949
        break;
2950
    case 3:
2951
        cpu_x86_update_cr3(env, t0);
2952
        break;
2953
    case 4:
2954
        cpu_x86_update_cr4(env, t0);
2955
        break;
2956
    case 8:
2957
        cpu_set_apic_tpr(env, t0);
2958
        env->cr[8] = t0;
2959
        break;
2960
    default:
2961
        env->cr[reg] = t0;
2962
        break;
2963
    }
2964
}
2965
#endif
2966

    
2967
void helper_lmsw(target_ulong t0)
2968
{
2969
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2970
       if already set to one. */
2971
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2972
    helper_write_crN(0, t0);
2973
}
2974

    
2975
void helper_clts(void)
2976
{
2977
    env->cr[0] &= ~CR0_TS_MASK;
2978
    env->hflags &= ~HF_TS_MASK;
2979
}
2980

    
2981
#if !defined(CONFIG_USER_ONLY)
2982
target_ulong helper_movtl_T0_cr8(void)
2983
{
2984
    return cpu_get_apic_tpr(env);
2985
}
2986
#endif
2987

    
2988
/* XXX: do more */
2989
void helper_movl_drN_T0(int reg, target_ulong t0)
2990
{
2991
    env->dr[reg] = t0;
2992
}
2993

    
2994
void helper_invlpg(target_ulong addr)
2995
{
2996
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2997
    cpu_x86_flush_tlb(env, addr);
2998
}
2999

    
3000
void helper_rdtsc(void)
3001
{
3002
    uint64_t val;
3003

    
3004
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3005
        raise_exception(EXCP0D_GPF);
3006
    }
3007
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3008

    
3009
    val = cpu_get_tsc(env);
3010
    EAX = (uint32_t)(val);
3011
    EDX = (uint32_t)(val >> 32);
3012
}
3013

    
3014
void helper_rdpmc(void)
3015
{
3016
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3017
        raise_exception(EXCP0D_GPF);
3018
    }
3019
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3020
    
3021
    /* currently unimplemented */
3022
    raise_exception_err(EXCP06_ILLOP, 0);
3023
}
3024

    
3025
#if defined(CONFIG_USER_ONLY)
3026
void helper_wrmsr(void)
3027
{
3028
}
3029

    
3030
void helper_rdmsr(void)
3031
{
3032
}
3033
#else
3034
void helper_wrmsr(void)
3035
{
3036
    uint64_t val;
3037

    
3038
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3039

    
3040
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3041

    
3042
    switch((uint32_t)ECX) {
3043
    case MSR_IA32_SYSENTER_CS:
3044
        env->sysenter_cs = val & 0xffff;
3045
        break;
3046
    case MSR_IA32_SYSENTER_ESP:
3047
        env->sysenter_esp = val;
3048
        break;
3049
    case MSR_IA32_SYSENTER_EIP:
3050
        env->sysenter_eip = val;
3051
        break;
3052
    case MSR_IA32_APICBASE:
3053
        cpu_set_apic_base(env, val);
3054
        break;
3055
    case MSR_EFER:
3056
        {
3057
            uint64_t update_mask;
3058
            update_mask = 0;
3059
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3060
                update_mask |= MSR_EFER_SCE;
3061
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3062
                update_mask |= MSR_EFER_LME;
3063
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3064
                update_mask |= MSR_EFER_FFXSR;
3065
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3066
                update_mask |= MSR_EFER_NXE;
3067
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3068
                update_mask |= MSR_EFER_SVME;
3069
            cpu_load_efer(env, (env->efer & ~update_mask) |
3070
                          (val & update_mask));
3071
        }
3072
        break;
3073
    case MSR_STAR:
3074
        env->star = val;
3075
        break;
3076
    case MSR_PAT:
3077
        env->pat = val;
3078
        break;
3079
    case MSR_VM_HSAVE_PA:
3080
        env->vm_hsave = val;
3081
        break;
3082
#ifdef TARGET_X86_64
3083
    case MSR_LSTAR:
3084
        env->lstar = val;
3085
        break;
3086
    case MSR_CSTAR:
3087
        env->cstar = val;
3088
        break;
3089
    case MSR_FMASK:
3090
        env->fmask = val;
3091
        break;
3092
    case MSR_FSBASE:
3093
        env->segs[R_FS].base = val;
3094
        break;
3095
    case MSR_GSBASE:
3096
        env->segs[R_GS].base = val;
3097
        break;
3098
    case MSR_KERNELGSBASE:
3099
        env->kernelgsbase = val;
3100
        break;
3101
#endif
3102
    default:
3103
        /* XXX: exception ? */
3104
        break;
3105
    }
3106
}
3107

    
3108
void helper_rdmsr(void)
3109
{
3110
    uint64_t val;
3111

    
3112
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3113

    
3114
    switch((uint32_t)ECX) {
3115
    case MSR_IA32_SYSENTER_CS:
3116
        val = env->sysenter_cs;
3117
        break;
3118
    case MSR_IA32_SYSENTER_ESP:
3119
        val = env->sysenter_esp;
3120
        break;
3121
    case MSR_IA32_SYSENTER_EIP:
3122
        val = env->sysenter_eip;
3123
        break;
3124
    case MSR_IA32_APICBASE:
3125
        val = cpu_get_apic_base(env);
3126
        break;
3127
    case MSR_EFER:
3128
        val = env->efer;
3129
        break;
3130
    case MSR_STAR:
3131
        val = env->star;
3132
        break;
3133
    case MSR_PAT:
3134
        val = env->pat;
3135
        break;
3136
    case MSR_VM_HSAVE_PA:
3137
        val = env->vm_hsave;
3138
        break;
3139
#ifdef TARGET_X86_64
3140
    case MSR_LSTAR:
3141
        val = env->lstar;
3142
        break;
3143
    case MSR_CSTAR:
3144
        val = env->cstar;
3145
        break;
3146
    case MSR_FMASK:
3147
        val = env->fmask;
3148
        break;
3149
    case MSR_FSBASE:
3150
        val = env->segs[R_FS].base;
3151
        break;
3152
    case MSR_GSBASE:
3153
        val = env->segs[R_GS].base;
3154
        break;
3155
    case MSR_KERNELGSBASE:
3156
        val = env->kernelgsbase;
3157
        break;
3158
#endif
3159
#ifdef USE_KQEMU
3160
    case MSR_QPI_COMMBASE:
3161
        if (env->kqemu_enabled) {
3162
            val = kqemu_comm_base;
3163
        } else {
3164
            val = 0;
3165
        }
3166
        break;
3167
#endif
3168
    default:
3169
        /* XXX: exception ? */
3170
        val = 0;
3171
        break;
3172
    }
3173
    EAX = (uint32_t)(val);
3174
    EDX = (uint32_t)(val >> 32);
3175
}
3176
#endif
3177

    
3178
target_ulong helper_lsl(target_ulong selector1)
3179
{
3180
    unsigned int limit;
3181
    uint32_t e1, e2, eflags, selector;
3182
    int rpl, dpl, cpl, type;
3183

    
3184
    selector = selector1 & 0xffff;
3185
    eflags = cc_table[CC_OP].compute_all();
3186
    if (load_segment(&e1, &e2, selector) != 0)
3187
        goto fail;
3188
    rpl = selector & 3;
3189
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3190
    cpl = env->hflags & HF_CPL_MASK;
3191
    if (e2 & DESC_S_MASK) {
3192
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3193
            /* conforming */
3194
        } else {
3195
            if (dpl < cpl || dpl < rpl)
3196
                goto fail;
3197
        }
3198
    } else {
3199
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3200
        switch(type) {
3201
        case 1:
3202
        case 2:
3203
        case 3:
3204
        case 9:
3205
        case 11:
3206
            break;
3207
        default:
3208
            goto fail;
3209
        }
3210
        if (dpl < cpl || dpl < rpl) {
3211
        fail:
3212
            CC_SRC = eflags & ~CC_Z;
3213
            return 0;
3214
        }
3215
    }
3216
    limit = get_seg_limit(e1, e2);
3217
    CC_SRC = eflags | CC_Z;
3218
    return limit;
3219
}
3220

    
3221
target_ulong helper_lar(target_ulong selector1)
3222
{
3223
    uint32_t e1, e2, eflags, selector;
3224
    int rpl, dpl, cpl, type;
3225

    
3226
    selector = selector1 & 0xffff;
3227
    eflags = cc_table[CC_OP].compute_all();
3228
    if ((selector & 0xfffc) == 0)
3229
        goto fail;
3230
    if (load_segment(&e1, &e2, selector) != 0)
3231
        goto fail;
3232
    rpl = selector & 3;
3233
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3234
    cpl = env->hflags & HF_CPL_MASK;
3235
    if (e2 & DESC_S_MASK) {
3236
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3237
            /* conforming */
3238
        } else {
3239
            if (dpl < cpl || dpl < rpl)
3240
                goto fail;
3241
        }
3242
    } else {
3243
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3244
        switch(type) {
3245
        case 1:
3246
        case 2:
3247
        case 3:
3248
        case 4:
3249
        case 5:
3250
        case 9:
3251
        case 11:
3252
        case 12:
3253
            break;
3254
        default:
3255
            goto fail;
3256
        }
3257
        if (dpl < cpl || dpl < rpl) {
3258
        fail:
3259
            CC_SRC = eflags & ~CC_Z;
3260
            return 0;
3261
        }
3262
    }
3263
    CC_SRC = eflags | CC_Z;
3264
    return e2 & 0x00f0ff00;
3265
}
3266

    
3267
void helper_verr(target_ulong selector1)
3268
{
3269
    uint32_t e1, e2, eflags, selector;
3270
    int rpl, dpl, cpl;
3271

    
3272
    selector = selector1 & 0xffff;
3273
    eflags = cc_table[CC_OP].compute_all();
3274
    if ((selector & 0xfffc) == 0)
3275
        goto fail;
3276
    if (load_segment(&e1, &e2, selector) != 0)
3277
        goto fail;
3278
    if (!(e2 & DESC_S_MASK))
3279
        goto fail;
3280
    rpl = selector & 3;
3281
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3282
    cpl = env->hflags & HF_CPL_MASK;
3283
    if (e2 & DESC_CS_MASK) {
3284
        if (!(e2 & DESC_R_MASK))
3285
            goto fail;
3286
        if (!(e2 & DESC_C_MASK)) {
3287
            if (dpl < cpl || dpl < rpl)
3288
                goto fail;
3289
        }
3290
    } else {
3291
        if (dpl < cpl || dpl < rpl) {
3292
        fail:
3293
            CC_SRC = eflags & ~CC_Z;
3294
            return;
3295
        }
3296
    }
3297
    CC_SRC = eflags | CC_Z;
3298
}
3299

    
3300
void helper_verw(target_ulong selector1)
3301
{
3302
    uint32_t e1, e2, eflags, selector;
3303
    int rpl, dpl, cpl;
3304

    
3305
    selector = selector1 & 0xffff;
3306
    eflags = cc_table[CC_OP].compute_all();
3307
    if ((selector & 0xfffc) == 0)
3308
        goto fail;
3309
    if (load_segment(&e1, &e2, selector) != 0)
3310
        goto fail;
3311
    if (!(e2 & DESC_S_MASK))
3312
        goto fail;
3313
    rpl = selector & 3;
3314
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3315
    cpl = env->hflags & HF_CPL_MASK;
3316
    if (e2 & DESC_CS_MASK) {
3317
        goto fail;
3318
    } else {
3319
        if (dpl < cpl || dpl < rpl)
3320
            goto fail;
3321
        if (!(e2 & DESC_W_MASK)) {
3322
        fail:
3323
            CC_SRC = eflags & ~CC_Z;
3324
            return;
3325
        }
3326
    }
3327
    CC_SRC = eflags | CC_Z;
3328
}
3329

    
3330
/* x87 FPU helpers */
3331

    
3332
static void fpu_set_exception(int mask)
3333
{
3334
    env->fpus |= mask;
3335
    if (env->fpus & (~env->fpuc & FPUC_EM))
3336
        env->fpus |= FPUS_SE | FPUS_B;
3337
}
3338

    
3339
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3340
{
3341
    if (b == 0.0)
3342
        fpu_set_exception(FPUS_ZE);
3343
    return a / b;
3344
}
3345

    
3346
void fpu_raise_exception(void)
3347
{
3348
    if (env->cr[0] & CR0_NE_MASK) {
3349
        raise_exception(EXCP10_COPR);
3350
    }
3351
#if !defined(CONFIG_USER_ONLY)
3352
    else {
3353
        cpu_set_ferr(env);
3354
    }
3355
#endif
3356
}
3357

    
3358
void helper_flds_FT0(uint32_t val)
3359
{
3360
    union {
3361
        float32 f;
3362
        uint32_t i;
3363
    } u;
3364
    u.i = val;
3365
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3366
}
3367

    
3368
void helper_fldl_FT0(uint64_t val)
3369
{
3370
    union {
3371
        float64 f;
3372
        uint64_t i;
3373
    } u;
3374
    u.i = val;
3375
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3376
}
3377

    
3378
void helper_fildl_FT0(int32_t val)
3379
{
3380
    FT0 = int32_to_floatx(val, &env->fp_status);
3381
}
3382

    
3383
void helper_flds_ST0(uint32_t val)
3384
{
3385
    int new_fpstt;
3386
    union {
3387
        float32 f;
3388
        uint32_t i;
3389
    } u;
3390
    new_fpstt = (env->fpstt - 1) & 7;
3391
    u.i = val;
3392
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3393
    env->fpstt = new_fpstt;
3394
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3395
}
3396

    
3397
void helper_fldl_ST0(uint64_t val)
3398
{
3399
    int new_fpstt;
3400
    union {
3401
        float64 f;
3402
        uint64_t i;
3403
    } u;
3404
    new_fpstt = (env->fpstt - 1) & 7;
3405
    u.i = val;
3406
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3407
    env->fpstt = new_fpstt;
3408
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3409
}
3410

    
3411
void helper_fildl_ST0(int32_t val)
3412
{
3413
    int new_fpstt;
3414
    new_fpstt = (env->fpstt - 1) & 7;
3415
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3416
    env->fpstt = new_fpstt;
3417
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3418
}
3419

    
3420
void helper_fildll_ST0(int64_t val)
3421
{
3422
    int new_fpstt;
3423
    new_fpstt = (env->fpstt - 1) & 7;
3424
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3425
    env->fpstt = new_fpstt;
3426
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3427
}
3428

    
3429
uint32_t helper_fsts_ST0(void)
3430
{
3431
    union {
3432
        float32 f;
3433
        uint32_t i;
3434
    } u;
3435
    u.f = floatx_to_float32(ST0, &env->fp_status);
3436
    return u.i;
3437
}
3438

    
3439
uint64_t helper_fstl_ST0(void)
3440
{
3441
    union {
3442
        float64 f;
3443
        uint64_t i;
3444
    } u;
3445
    u.f = floatx_to_float64(ST0, &env->fp_status);
3446
    return u.i;
3447
}
3448

    
3449
int32_t helper_fist_ST0(void)
3450
{
3451
    int32_t val;
3452
    val = floatx_to_int32(ST0, &env->fp_status);
3453
    if (val != (int16_t)val)
3454
        val = -32768;
3455
    return val;
3456
}
3457

    
3458
int32_t helper_fistl_ST0(void)
3459
{
3460
    int32_t val;
3461
    val = floatx_to_int32(ST0, &env->fp_status);
3462
    return val;
3463
}
3464

    
3465
int64_t helper_fistll_ST0(void)
3466
{
3467
    int64_t val;
3468
    val = floatx_to_int64(ST0, &env->fp_status);
3469
    return val;
3470
}
3471

    
3472
int32_t helper_fistt_ST0(void)
3473
{
3474
    int32_t val;
3475
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3476
    if (val != (int16_t)val)
3477
        val = -32768;
3478
    return val;
3479
}
3480

    
3481
int32_t helper_fisttl_ST0(void)
3482
{
3483
    int32_t val;
3484
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3485
    return val;
3486
}
3487

    
3488
int64_t helper_fisttll_ST0(void)
3489
{
3490
    int64_t val;
3491
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3492
    return val;
3493
}
3494

    
3495
void helper_fldt_ST0(target_ulong ptr)
3496
{
3497
    int new_fpstt;
3498
    new_fpstt = (env->fpstt - 1) & 7;
3499
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3500
    env->fpstt = new_fpstt;
3501
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3502
}
3503

    
3504
void helper_fstt_ST0(target_ulong ptr)
3505
{
3506
    helper_fstt(ST0, ptr);
3507
}
3508

    
3509
void helper_fpush(void)
3510
{
3511
    fpush();
3512
}
3513

    
3514
void helper_fpop(void)
3515
{
3516
    fpop();
3517
}
3518

    
3519
void helper_fdecstp(void)
3520
{
3521
    env->fpstt = (env->fpstt - 1) & 7;
3522
    env->fpus &= (~0x4700);
3523
}
3524

    
3525
void helper_fincstp(void)
3526
{
3527
    env->fpstt = (env->fpstt + 1) & 7;
3528
    env->fpus &= (~0x4700);
3529
}
3530

    
3531
/* FPU move */
3532

    
3533
void helper_ffree_STN(int st_index)
3534
{
3535
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3536
}
3537

    
3538
void helper_fmov_ST0_FT0(void)
3539
{
3540
    ST0 = FT0;
3541
}
3542

    
3543
void helper_fmov_FT0_STN(int st_index)
3544
{
3545
    FT0 = ST(st_index);
3546
}
3547

    
3548
void helper_fmov_ST0_STN(int st_index)
3549
{
3550
    ST0 = ST(st_index);
3551
}
3552

    
3553
void helper_fmov_STN_ST0(int st_index)
3554
{
3555
    ST(st_index) = ST0;
3556
}
3557

    
3558
void helper_fxchg_ST0_STN(int st_index)
3559
{
3560
    CPU86_LDouble tmp;
3561
    tmp = ST(st_index);
3562
    ST(st_index) = ST0;
3563
    ST0 = tmp;
3564
}
3565

    
3566
/* FPU operations */
3567

    
3568
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3569

    
3570
void helper_fcom_ST0_FT0(void)
3571
{
3572
    int ret;
3573

    
3574
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3575
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3576
    FORCE_RET();
3577
}
3578

    
3579
void helper_fucom_ST0_FT0(void)
3580
{
3581
    int ret;
3582

    
3583
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3584
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3585
    FORCE_RET();
3586
}
3587

    
3588
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3589

    
3590
void helper_fcomi_ST0_FT0(void)
3591
{
3592
    int eflags;
3593
    int ret;
3594

    
3595
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3596
    eflags = cc_table[CC_OP].compute_all();
3597
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3598
    CC_SRC = eflags;
3599
    FORCE_RET();
3600
}
3601

    
3602
void helper_fucomi_ST0_FT0(void)
3603
{
3604
    int eflags;
3605
    int ret;
3606

    
3607
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3608
    eflags = cc_table[CC_OP].compute_all();
3609
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3610
    CC_SRC = eflags;
3611
    FORCE_RET();
3612
}
3613

    
3614
void helper_fadd_ST0_FT0(void)
3615
{
3616
    ST0 += FT0;
3617
}
3618

    
3619
void helper_fmul_ST0_FT0(void)
3620
{
3621
    ST0 *= FT0;
3622
}
3623

    
3624
void helper_fsub_ST0_FT0(void)
3625
{
3626
    ST0 -= FT0;
3627
}
3628

    
3629
void helper_fsubr_ST0_FT0(void)
3630
{
3631
    ST0 = FT0 - ST0;
3632
}
3633

    
3634
void helper_fdiv_ST0_FT0(void)
3635
{
3636
    ST0 = helper_fdiv(ST0, FT0);
3637
}
3638

    
3639
void helper_fdivr_ST0_FT0(void)
3640
{
3641
    ST0 = helper_fdiv(FT0, ST0);
3642
}
3643

    
3644
/* fp operations between STN and ST0 */
3645

    
3646
void helper_fadd_STN_ST0(int st_index)
3647
{
3648
    ST(st_index) += ST0;
3649
}
3650

    
3651
void helper_fmul_STN_ST0(int st_index)
3652
{
3653
    ST(st_index) *= ST0;
3654
}
3655

    
3656
void helper_fsub_STN_ST0(int st_index)
3657
{
3658
    ST(st_index) -= ST0;
3659
}
3660

    
3661
void helper_fsubr_STN_ST0(int st_index)
3662
{
3663
    CPU86_LDouble *p;
3664
    p = &ST(st_index);
3665
    *p = ST0 - *p;
3666
}
3667

    
3668
void helper_fdiv_STN_ST0(int st_index)
3669
{
3670
    CPU86_LDouble *p;
3671
    p = &ST(st_index);
3672
    *p = helper_fdiv(*p, ST0);
3673
}
3674

    
3675
void helper_fdivr_STN_ST0(int st_index)
3676
{
3677
    CPU86_LDouble *p;
3678
    p = &ST(st_index);
3679
    *p = helper_fdiv(ST0, *p);
3680
}
3681

    
3682
/* misc FPU operations */
3683
void helper_fchs_ST0(void)
3684
{
3685
    ST0 = floatx_chs(ST0);
3686
}
3687

    
3688
void helper_fabs_ST0(void)
3689
{
3690
    ST0 = floatx_abs(ST0);
3691
}
3692

    
3693
void helper_fld1_ST0(void)
3694
{
3695
    ST0 = f15rk[1];
3696
}
3697

    
3698
void helper_fldl2t_ST0(void)
3699
{
3700
    ST0 = f15rk[6];
3701
}
3702

    
3703
void helper_fldl2e_ST0(void)
3704
{
3705
    ST0 = f15rk[5];
3706
}
3707

    
3708
void helper_fldpi_ST0(void)
3709
{
3710
    ST0 = f15rk[2];
3711
}
3712

    
3713
void helper_fldlg2_ST0(void)
3714
{
3715
    ST0 = f15rk[3];
3716
}
3717

    
3718
void helper_fldln2_ST0(void)
3719
{
3720
    ST0 = f15rk[4];
3721
}
3722

    
3723
void helper_fldz_ST0(void)
3724
{
3725
    ST0 = f15rk[0];
3726
}
3727

    
3728
void helper_fldz_FT0(void)
3729
{
3730
    FT0 = f15rk[0];
3731
}
3732

    
3733
uint32_t helper_fnstsw(void)
3734
{
3735
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3736
}
3737

    
3738
uint32_t helper_fnstcw(void)
3739
{
3740
    return env->fpuc;
3741
}
3742

    
3743
static void update_fp_status(void)
3744
{
3745
    int rnd_type;
3746

    
3747
    /* set rounding mode */
3748
    switch(env->fpuc & RC_MASK) {
3749
    default:
3750
    case RC_NEAR:
3751
        rnd_type = float_round_nearest_even;
3752
        break;
3753
    case RC_DOWN:
3754
        rnd_type = float_round_down;
3755
        break;
3756
    case RC_UP:
3757
        rnd_type = float_round_up;
3758
        break;
3759
    case RC_CHOP:
3760
        rnd_type = float_round_to_zero;
3761
        break;
3762
    }
3763
    set_float_rounding_mode(rnd_type, &env->fp_status);
3764
#ifdef FLOATX80
3765
    switch((env->fpuc >> 8) & 3) {
3766
    case 0:
3767
        rnd_type = 32;
3768
        break;
3769
    case 2:
3770
        rnd_type = 64;
3771
        break;
3772
    case 3:
3773
    default:
3774
        rnd_type = 80;
3775
        break;
3776
    }
3777
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3778
#endif
3779
}
3780

    
3781
void helper_fldcw(uint32_t val)
3782
{
3783
    env->fpuc = val;
3784
    update_fp_status();
3785
}
3786

    
3787
void helper_fclex(void)
3788
{
3789
    env->fpus &= 0x7f00;
3790
}
3791

    
3792
void helper_fwait(void)
3793
{
3794
    if (env->fpus & FPUS_SE)
3795
        fpu_raise_exception();
3796
    FORCE_RET();
3797
}
3798

    
3799
void helper_fninit(void)
3800
{
3801
    env->fpus = 0;
3802
    env->fpstt = 0;
3803
    env->fpuc = 0x37f;
3804
    env->fptags[0] = 1;
3805
    env->fptags[1] = 1;
3806
    env->fptags[2] = 1;
3807
    env->fptags[3] = 1;
3808
    env->fptags[4] = 1;
3809
    env->fptags[5] = 1;
3810
    env->fptags[6] = 1;
3811
    env->fptags[7] = 1;
3812
}
3813

    
3814
/* BCD ops */
3815

    
3816
void helper_fbld_ST0(target_ulong ptr)
3817
{
3818
    CPU86_LDouble tmp;
3819
    uint64_t val;
3820
    unsigned int v;
3821
    int i;
3822

    
3823
    val = 0;
3824
    for(i = 8; i >= 0; i--) {
3825
        v = ldub(ptr + i);
3826
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3827
    }
3828
    tmp = val;
3829
    if (ldub(ptr + 9) & 0x80)
3830
        tmp = -tmp;
3831
    fpush();
3832
    ST0 = tmp;
3833
}
3834

    
3835
void helper_fbst_ST0(target_ulong ptr)
3836
{
3837
    int v;
3838
    target_ulong mem_ref, mem_end;
3839
    int64_t val;
3840

    
3841
    val = floatx_to_int64(ST0, &env->fp_status);
3842
    mem_ref = ptr;
3843
    mem_end = mem_ref + 9;
3844
    if (val < 0) {
3845
        stb(mem_end, 0x80);
3846
        val = -val;
3847
    } else {
3848
        stb(mem_end, 0x00);
3849
    }
3850
    while (mem_ref < mem_end) {
3851
        if (val == 0)
3852
            break;
3853
        v = val % 100;
3854
        val = val / 100;
3855
        v = ((v / 10) << 4) | (v % 10);
3856
        stb(mem_ref++, v);
3857
    }
3858
    while (mem_ref < mem_end) {
3859
        stb(mem_ref++, 0);
3860
    }
3861
}
3862

    
3863
void helper_f2xm1(void)
3864
{
3865
    ST0 = pow(2.0,ST0) - 1.0;
3866
}
3867

    
3868
void helper_fyl2x(void)
3869
{
3870
    CPU86_LDouble fptemp;
3871

    
3872
    fptemp = ST0;
3873
    if (fptemp>0.0){
3874
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3875
        ST1 *= fptemp;
3876
        fpop();
3877
    } else {
3878
        env->fpus &= (~0x4700);
3879
        env->fpus |= 0x400;
3880
    }
3881
}
3882

    
3883
void helper_fptan(void)
3884
{
3885
    CPU86_LDouble fptemp;
3886

    
3887
    fptemp = ST0;
3888
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3889
        env->fpus |= 0x400;
3890
    } else {
3891
        ST0 = tan(fptemp);
3892
        fpush();
3893
        ST0 = 1.0;
3894
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3895
        /* the above code is for  |arg| < 2**52 only */
3896
    }
3897
}
3898

    
3899
void helper_fpatan(void)
3900
{
3901
    CPU86_LDouble fptemp, fpsrcop;
3902

    
3903
    fpsrcop = ST1;
3904
    fptemp = ST0;
3905
    ST1 = atan2(fpsrcop,fptemp);
3906
    fpop();
3907
}
3908

    
3909
void helper_fxtract(void)
3910
{
3911
    CPU86_LDoubleU temp;
3912
    unsigned int expdif;
3913

    
3914
    temp.d = ST0;
3915
    expdif = EXPD(temp) - EXPBIAS;
3916
    /*DP exponent bias*/
3917
    ST0 = expdif;
3918
    fpush();
3919
    BIASEXPONENT(temp);
3920
    ST0 = temp.d;
3921
}
3922

    
3923
void helper_fprem1(void)
3924
{
3925
    CPU86_LDouble dblq, fpsrcop, fptemp;
3926
    CPU86_LDoubleU fpsrcop1, fptemp1;
3927
    int expdif;
3928
    signed long long int q;
3929

    
3930
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3931
        ST0 = 0.0 / 0.0; /* NaN */
3932
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3933
        return;
3934
    }
3935

    
3936
    fpsrcop = ST0;
3937
    fptemp = ST1;
3938
    fpsrcop1.d = fpsrcop;
3939
    fptemp1.d = fptemp;
3940
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3941

    
3942
    if (expdif < 0) {
3943
        /* optimisation? taken from the AMD docs */
3944
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3945
        /* ST0 is unchanged */
3946
        return;
3947
    }
3948

    
3949
    if (expdif < 53) {
3950
        dblq = fpsrcop / fptemp;
3951
        /* round dblq towards nearest integer */
3952
        dblq = rint(dblq);
3953
        ST0 = fpsrcop - fptemp * dblq;
3954

    
3955
        /* convert dblq to q by truncating towards zero */
3956
        if (dblq < 0.0)
3957
           q = (signed long long int)(-dblq);
3958
        else
3959
           q = (signed long long int)dblq;
3960

    
3961
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3962
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3963
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3964
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3965
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3966
    } else {
3967
        env->fpus |= 0x400;  /* C2 <-- 1 */
3968
        fptemp = pow(2.0, expdif - 50);
3969
        fpsrcop = (ST0 / ST1) / fptemp;
3970
        /* fpsrcop = integer obtained by chopping */
3971
        fpsrcop = (fpsrcop < 0.0) ?
3972
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3973
        ST0 -= (ST1 * fpsrcop * fptemp);
3974
    }
3975
}
3976

    
3977
void helper_fprem(void)
3978
{
3979
    CPU86_LDouble dblq, fpsrcop, fptemp;
3980
    CPU86_LDoubleU fpsrcop1, fptemp1;
3981
    int expdif;
3982
    signed long long int q;
3983

    
3984
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3985
       ST0 = 0.0 / 0.0; /* NaN */
3986
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3987
       return;
3988
    }
3989

    
3990
    fpsrcop = (CPU86_LDouble)ST0;
3991
    fptemp = (CPU86_LDouble)ST1;
3992
    fpsrcop1.d = fpsrcop;
3993
    fptemp1.d = fptemp;
3994
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3995

    
3996
    if (expdif < 0) {
3997
        /* optimisation? taken from the AMD docs */
3998
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3999
        /* ST0 is unchanged */
4000
        return;
4001
    }
4002

    
4003
    if ( expdif < 53 ) {
4004
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4005
        /* round dblq towards zero */
4006
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4007
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4008

    
4009
        /* convert dblq to q by truncating towards zero */
4010
        if (dblq < 0.0)
4011
           q = (signed long long int)(-dblq);
4012
        else
4013
           q = (signed long long int)dblq;
4014

    
4015
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4016
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4017
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4018
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4019
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4020
    } else {
4021
        int N = 32 + (expdif % 32); /* as per AMD docs */
4022
        env->fpus |= 0x400;  /* C2 <-- 1 */
4023
        fptemp = pow(2.0, (double)(expdif - N));
4024
        fpsrcop = (ST0 / ST1) / fptemp;
4025
        /* fpsrcop = integer obtained by chopping */
4026
        fpsrcop = (fpsrcop < 0.0) ?
4027
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4028
        ST0 -= (ST1 * fpsrcop * fptemp);
4029
    }
4030
}
4031

    
4032
void helper_fyl2xp1(void)
4033
{
4034
    CPU86_LDouble fptemp;
4035

    
4036
    fptemp = ST0;
4037
    if ((fptemp+1.0)>0.0) {
4038
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4039
        ST1 *= fptemp;
4040
        fpop();
4041
    } else {
4042
        env->fpus &= (~0x4700);
4043
        env->fpus |= 0x400;
4044
    }
4045
}
4046

    
4047
void helper_fsqrt(void)
4048
{
4049
    CPU86_LDouble fptemp;
4050

    
4051
    fptemp = ST0;
4052
    if (fptemp<0.0) {
4053
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4054
        env->fpus |= 0x400;
4055
    }
4056
    ST0 = sqrt(fptemp);
4057
}
4058

    
4059
void helper_fsincos(void)
4060
{
4061
    CPU86_LDouble fptemp;
4062

    
4063
    fptemp = ST0;
4064
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4065
        env->fpus |= 0x400;
4066
    } else {
4067
        ST0 = sin(fptemp);
4068
        fpush();
4069
        ST0 = cos(fptemp);
4070
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4071
        /* the above code is for  |arg| < 2**63 only */
4072
    }
4073
}
4074

    
4075
void helper_frndint(void)
4076
{
4077
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4078
}
4079

    
4080
void helper_fscale(void)
4081
{
4082
    ST0 = ldexp (ST0, (int)(ST1));
4083
}
4084

    
4085
void helper_fsin(void)
4086
{
4087
    CPU86_LDouble fptemp;
4088

    
4089
    fptemp = ST0;
4090
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4091
        env->fpus |= 0x400;
4092
    } else {
4093
        ST0 = sin(fptemp);
4094
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4095
        /* the above code is for  |arg| < 2**53 only */
4096
    }
4097
}
4098

    
4099
void helper_fcos(void)
4100
{
4101
    CPU86_LDouble fptemp;
4102

    
4103
    fptemp = ST0;
4104
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4105
        env->fpus |= 0x400;
4106
    } else {
4107
        ST0 = cos(fptemp);
4108
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4109
        /* the above code is for  |arg5 < 2**63 only */
4110
    }
4111
}
4112

    
4113
void helper_fxam_ST0(void)
4114
{
4115
    CPU86_LDoubleU temp;
4116
    int expdif;
4117

    
4118
    temp.d = ST0;
4119

    
4120
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4121
    if (SIGND(temp))
4122
        env->fpus |= 0x200; /* C1 <-- 1 */
4123

    
4124
    /* XXX: test fptags too */
4125
    expdif = EXPD(temp);
4126
    if (expdif == MAXEXPD) {
4127
#ifdef USE_X86LDOUBLE
4128
        if (MANTD(temp) == 0x8000000000000000ULL)
4129
#else
4130
        if (MANTD(temp) == 0)
4131
#endif
4132
            env->fpus |=  0x500 /*Infinity*/;
4133
        else
4134
            env->fpus |=  0x100 /*NaN*/;
4135
    } else if (expdif == 0) {
4136
        if (MANTD(temp) == 0)
4137
            env->fpus |=  0x4000 /*Zero*/;
4138
        else
4139
            env->fpus |= 0x4400 /*Denormal*/;
4140
    } else {
4141
        env->fpus |= 0x400;
4142
    }
4143
}
4144

    
4145
void helper_fstenv(target_ulong ptr, int data32)
4146
{
4147
    int fpus, fptag, exp, i;
4148
    uint64_t mant;
4149
    CPU86_LDoubleU tmp;
4150

    
4151
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4152
    fptag = 0;
4153
    for (i=7; i>=0; i--) {
4154
        fptag <<= 2;
4155
        if (env->fptags[i]) {
4156
            fptag |= 3;
4157
        } else {
4158
            tmp.d = env->fpregs[i].d;
4159
            exp = EXPD(tmp);
4160
            mant = MANTD(tmp);
4161
            if (exp == 0 && mant == 0) {
4162
                /* zero */
4163
                fptag |= 1;
4164
            } else if (exp == 0 || exp == MAXEXPD
4165
#ifdef USE_X86LDOUBLE
4166
                       || (mant & (1LL << 63)) == 0
4167
#endif
4168
                       ) {
4169
                /* NaNs, infinity, denormal */
4170
                fptag |= 2;
4171
            }
4172
        }
4173
    }
4174
    if (data32) {
4175
        /* 32 bit */
4176
        stl(ptr, env->fpuc);
4177
        stl(ptr + 4, fpus);
4178
        stl(ptr + 8, fptag);
4179
        stl(ptr + 12, 0); /* fpip */
4180
        stl(ptr + 16, 0); /* fpcs */
4181
        stl(ptr + 20, 0); /* fpoo */
4182
        stl(ptr + 24, 0); /* fpos */
4183
    } else {
4184
        /* 16 bit */
4185
        stw(ptr, env->fpuc);
4186
        stw(ptr + 2, fpus);
4187
        stw(ptr + 4, fptag);
4188
        stw(ptr + 6, 0);
4189
        stw(ptr + 8, 0);
4190
        stw(ptr + 10, 0);
4191
        stw(ptr + 12, 0);
4192
    }
4193
}
4194

    
4195
void helper_fldenv(target_ulong ptr, int data32)
4196
{
4197
    int i, fpus, fptag;
4198

    
4199
    if (data32) {
4200
        env->fpuc = lduw(ptr);
4201
        fpus = lduw(ptr + 4);
4202
        fptag = lduw(ptr + 8);
4203
    }
4204
    else {
4205
        env->fpuc = lduw(ptr);
4206
        fpus = lduw(ptr + 2);
4207
        fptag = lduw(ptr + 4);
4208
    }
4209
    env->fpstt = (fpus >> 11) & 7;
4210
    env->fpus = fpus & ~0x3800;
4211
    for(i = 0;i < 8; i++) {
4212
        env->fptags[i] = ((fptag & 3) == 3);
4213
        fptag >>= 2;
4214
    }
4215
}
4216

    
4217
void helper_fsave(target_ulong ptr, int data32)
4218
{
4219
    CPU86_LDouble tmp;
4220
    int i;
4221

    
4222
    helper_fstenv(ptr, data32);
4223

    
4224
    ptr += (14 << data32);
4225
    for(i = 0;i < 8; i++) {
4226
        tmp = ST(i);
4227
        helper_fstt(tmp, ptr);
4228
        ptr += 10;
4229
    }
4230

    
4231
    /* fninit */
4232
    env->fpus = 0;
4233
    env->fpstt = 0;
4234
    env->fpuc = 0x37f;
4235
    env->fptags[0] = 1;
4236
    env->fptags[1] = 1;
4237
    env->fptags[2] = 1;
4238
    env->fptags[3] = 1;
4239
    env->fptags[4] = 1;
4240
    env->fptags[5] = 1;
4241
    env->fptags[6] = 1;
4242
    env->fptags[7] = 1;
4243
}
4244

    
4245
void helper_frstor(target_ulong ptr, int data32)
4246
{
4247
    CPU86_LDouble tmp;
4248
    int i;
4249

    
4250
    helper_fldenv(ptr, data32);
4251
    ptr += (14 << data32);
4252

    
4253
    for(i = 0;i < 8; i++) {
4254
        tmp = helper_fldt(ptr);
4255
        ST(i) = tmp;
4256
        ptr += 10;
4257
    }
4258
}
4259

    
4260
void helper_fxsave(target_ulong ptr, int data64)
4261
{
4262
    int fpus, fptag, i, nb_xmm_regs;
4263
    CPU86_LDouble tmp;
4264
    target_ulong addr;
4265

    
4266
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4267
    fptag = 0;
4268
    for(i = 0; i < 8; i++) {
4269
        fptag |= (env->fptags[i] << i);
4270
    }
4271
    stw(ptr, env->fpuc);
4272
    stw(ptr + 2, fpus);
4273
    stw(ptr + 4, fptag ^ 0xff);
4274
#ifdef TARGET_X86_64
4275
    if (data64) {
4276
        stq(ptr + 0x08, 0); /* rip */
4277
        stq(ptr + 0x10, 0); /* rdp */
4278
    } else 
4279
#endif
4280
    {
4281
        stl(ptr + 0x08, 0); /* eip */
4282
        stl(ptr + 0x0c, 0); /* sel  */
4283
        stl(ptr + 0x10, 0); /* dp */
4284
        stl(ptr + 0x14, 0); /* sel  */
4285
    }
4286

    
4287
    addr = ptr + 0x20;
4288
    for(i = 0;i < 8; i++) {
4289
        tmp = ST(i);
4290
        helper_fstt(tmp, addr);
4291
        addr += 16;
4292
    }
4293

    
4294
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4295
        /* XXX: finish it */
4296
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4297
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4298
        if (env->hflags & HF_CS64_MASK)
4299
            nb_xmm_regs = 16;
4300
        else
4301
            nb_xmm_regs = 8;
4302
        addr = ptr + 0xa0;
4303
        for(i = 0; i < nb_xmm_regs; i++) {
4304
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4305
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4306
            addr += 16;
4307
        }
4308
    }
4309
}
4310

    
4311
void helper_fxrstor(target_ulong ptr, int data64)
4312
{
4313
    int i, fpus, fptag, nb_xmm_regs;
4314
    CPU86_LDouble tmp;
4315
    target_ulong addr;
4316

    
4317
    env->fpuc = lduw(ptr);
4318
    fpus = lduw(ptr + 2);
4319
    fptag = lduw(ptr + 4);
4320
    env->fpstt = (fpus >> 11) & 7;
4321
    env->fpus = fpus & ~0x3800;
4322
    fptag ^= 0xff;
4323
    for(i = 0;i < 8; i++) {
4324
        env->fptags[i] = ((fptag >> i) & 1);
4325
    }
4326

    
4327
    addr = ptr + 0x20;
4328
    for(i = 0;i < 8; i++) {
4329
        tmp = helper_fldt(addr);
4330
        ST(i) = tmp;
4331
        addr += 16;
4332
    }
4333

    
4334
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4335
        /* XXX: finish it */
4336
        env->mxcsr = ldl(ptr + 0x18);
4337
        //ldl(ptr + 0x1c);
4338
        if (env->hflags & HF_CS64_MASK)
4339
            nb_xmm_regs = 16;
4340
        else
4341
            nb_xmm_regs = 8;
4342
        addr = ptr + 0xa0;
4343
        for(i = 0; i < nb_xmm_regs; i++) {
4344
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4345
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4346
            addr += 16;
4347
        }
4348
    }
4349
}
4350

    
4351
#ifndef USE_X86LDOUBLE
4352

    
4353
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4354
{
4355
    CPU86_LDoubleU temp;
4356
    int e;
4357

    
4358
    temp.d = f;
4359
    /* mantissa */
4360
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4361
    /* exponent + sign */
4362
    e = EXPD(temp) - EXPBIAS + 16383;
4363
    e |= SIGND(temp) >> 16;
4364
    *pexp = e;
4365
}
4366

    
4367
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4368
{
4369
    CPU86_LDoubleU temp;
4370
    int e;
4371
    uint64_t ll;
4372

    
4373
    /* XXX: handle overflow ? */
4374
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4375
    e |= (upper >> 4) & 0x800; /* sign */
4376
    ll = (mant >> 11) & ((1LL << 52) - 1);
4377
#ifdef __arm__
4378
    temp.l.upper = (e << 20) | (ll >> 32);
4379
    temp.l.lower = ll;
4380
#else
4381
    temp.ll = ll | ((uint64_t)e << 52);
4382
#endif
4383
    return temp.d;
4384
}
4385

    
4386
#else
4387

    
4388
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4389
{
4390
    CPU86_LDoubleU temp;
4391

    
4392
    temp.d = f;
4393
    *pmant = temp.l.lower;
4394
    *pexp = temp.l.upper;
4395
}
4396

    
4397
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4398
{
4399
    CPU86_LDoubleU temp;
4400

    
4401
    temp.l.upper = upper;
4402
    temp.l.lower = mant;
4403
    return temp.d;
4404
}
4405
#endif
4406

    
4407
#ifdef TARGET_X86_64
4408

    
4409
//#define DEBUG_MULDIV
4410

    
4411
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4412
{
4413
    *plow += a;
4414
    /* carry test */
4415
    if (*plow < a)
4416
        (*phigh)++;
4417
    *phigh += b;
4418
}
4419

    
4420
static void neg128(uint64_t *plow, uint64_t *phigh)
4421
{
4422
    *plow = ~ *plow;
4423
    *phigh = ~ *phigh;
4424
    add128(plow, phigh, 1, 0);
4425
}
4426

    
4427
/* return TRUE if overflow */
4428
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4429
{
4430
    uint64_t q, r, a1, a0;
4431
    int i, qb, ab;
4432

    
4433
    a0 = *plow;
4434
    a1 = *phigh;
4435
    if (a1 == 0) {
4436
        q = a0 / b;
4437
        r = a0 % b;
4438
        *plow = q;
4439
        *phigh = r;
4440
    } else {
4441
        if (a1 >= b)
4442
            return 1;
4443
        /* XXX: use a better algorithm */
4444
        for(i = 0; i < 64; i++) {
4445
            ab = a1 >> 63;
4446
            a1 = (a1 << 1) | (a0 >> 63);
4447
            if (ab || a1 >= b) {
4448
                a1 -= b;
4449
                qb = 1;
4450
            } else {
4451
                qb = 0;
4452
            }
4453
            a0 = (a0 << 1) | qb;
4454
        }
4455
#if defined(DEBUG_MULDIV)
4456
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4457
               *phigh, *plow, b, a0, a1);
4458
#endif
4459
        *plow = a0;
4460
        *phigh = a1;
4461
    }
4462
    return 0;
4463
}
4464

    
4465
/* return TRUE if overflow */
4466
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4467
{
4468
    int sa, sb;
4469
    sa = ((int64_t)*phigh < 0);
4470
    if (sa)
4471
        neg128(plow, phigh);
4472
    sb = (b < 0);
4473
    if (sb)
4474
        b = -b;
4475
    if (div64(plow, phigh, b) != 0)
4476
        return 1;
4477
    if (sa ^ sb) {
4478
        if (*plow > (1ULL << 63))
4479
            return 1;
4480
        *plow = - *plow;
4481
    } else {
4482
        if (*plow >= (1ULL << 63))
4483
            return 1;
4484
    }
4485
    if (sa)
4486
        *phigh = - *phigh;
4487
    return 0;
4488
}
4489

    
4490
void helper_mulq_EAX_T0(target_ulong t0)
4491
{
4492
    uint64_t r0, r1;
4493

    
4494
    mulu64(&r0, &r1, EAX, t0);
4495
    EAX = r0;
4496
    EDX = r1;
4497
    CC_DST = r0;
4498
    CC_SRC = r1;
4499
}
4500

    
4501
void helper_imulq_EAX_T0(target_ulong t0)
4502
{
4503
    uint64_t r0, r1;
4504

    
4505
    muls64(&r0, &r1, EAX, t0);
4506
    EAX = r0;
4507
    EDX = r1;
4508
    CC_DST = r0;
4509
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4510
}
4511

    
4512
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4513
{
4514
    uint64_t r0, r1;
4515

    
4516
    muls64(&r0, &r1, t0, t1);
4517
    CC_DST = r0;
4518
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4519
    return r0;
4520
}
4521

    
4522
void helper_divq_EAX(target_ulong t0)
4523
{
4524
    uint64_t r0, r1;
4525
    if (t0 == 0) {
4526
        raise_exception(EXCP00_DIVZ);
4527
    }
4528
    r0 = EAX;
4529
    r1 = EDX;
4530
    if (div64(&r0, &r1, t0))
4531
        raise_exception(EXCP00_DIVZ);
4532
    EAX = r0;
4533
    EDX = r1;
4534
}
4535

    
4536
void helper_idivq_EAX(target_ulong t0)
4537
{
4538
    uint64_t r0, r1;
4539
    if (t0 == 0) {
4540
        raise_exception(EXCP00_DIVZ);
4541
    }
4542
    r0 = EAX;
4543
    r1 = EDX;
4544
    if (idiv64(&r0, &r1, t0))
4545
        raise_exception(EXCP00_DIVZ);
4546
    EAX = r0;
4547
    EDX = r1;
4548
}
4549
#endif
4550

    
4551
void helper_hlt(void)
4552
{
4553
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4554
    
4555
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4556
    env->halted = 1;
4557
    env->exception_index = EXCP_HLT;
4558
    cpu_loop_exit();
4559
}
4560

    
4561
void helper_monitor(target_ulong ptr)
4562
{
4563
    if ((uint32_t)ECX != 0)
4564
        raise_exception(EXCP0D_GPF);
4565
    /* XXX: store address ? */
4566
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4567
}
4568

    
4569
void helper_mwait(void)
4570
{
4571
    if ((uint32_t)ECX != 0)
4572
        raise_exception(EXCP0D_GPF);
4573
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4574
    /* XXX: not complete but not completely erroneous */
4575
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4576
        /* more than one CPU: do not sleep because another CPU may
4577
           wake this one */
4578
    } else {
4579
        helper_hlt();
4580
    }
4581
}
4582

    
4583
void helper_debug(void)
4584
{
4585
    env->exception_index = EXCP_DEBUG;
4586
    cpu_loop_exit();
4587
}
4588

    
4589
void helper_raise_interrupt(int intno, int next_eip_addend)
4590
{
4591
    raise_interrupt(intno, 1, 0, next_eip_addend);
4592
}
4593

    
4594
void helper_raise_exception(int exception_index)
4595
{
4596
    raise_exception(exception_index);
4597
}
4598

    
4599
void helper_cli(void)
4600
{
4601
    env->eflags &= ~IF_MASK;
4602
}
4603

    
4604
void helper_sti(void)
4605
{
4606
    env->eflags |= IF_MASK;
4607
}
4608

    
4609
#if 0
4610
/* vm86plus instructions */
4611
void helper_cli_vm(void)
4612
{
4613
    env->eflags &= ~VIF_MASK;
4614
}
4615

4616
void helper_sti_vm(void)
4617
{
4618
    env->eflags |= VIF_MASK;
4619
    if (env->eflags & VIP_MASK) {
4620
        raise_exception(EXCP0D_GPF);
4621
    }
4622
}
4623
#endif
4624

    
4625
void helper_set_inhibit_irq(void)
4626
{
4627
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4628
}
4629

    
4630
void helper_reset_inhibit_irq(void)
4631
{
4632
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4633
}
4634

    
4635
void helper_boundw(target_ulong a0, int v)
4636
{
4637
    int low, high;
4638
    low = ldsw(a0);
4639
    high = ldsw(a0 + 2);
4640
    v = (int16_t)v;
4641
    if (v < low || v > high) {
4642
        raise_exception(EXCP05_BOUND);
4643
    }
4644
    FORCE_RET();
4645
}
4646

    
4647
void helper_boundl(target_ulong a0, int v)
4648
{
4649
    int low, high;
4650
    low = ldl(a0);
4651
    high = ldl(a0 + 4);
4652
    if (v < low || v > high) {
4653
        raise_exception(EXCP05_BOUND);
4654
    }
4655
    FORCE_RET();
4656
}
4657

    
4658
static float approx_rsqrt(float a)
4659
{
4660
    return 1.0 / sqrt(a);
4661
}
4662

    
4663
static float approx_rcp(float a)
4664
{
4665
    return 1.0 / a;
4666
}
4667

    
4668
#if !defined(CONFIG_USER_ONLY)
4669

    
4670
#define MMUSUFFIX _mmu
4671

    
4672
#define SHIFT 0
4673
#include "softmmu_template.h"
4674

    
4675
#define SHIFT 1
4676
#include "softmmu_template.h"
4677

    
4678
#define SHIFT 2
4679
#include "softmmu_template.h"
4680

    
4681
#define SHIFT 3
4682
#include "softmmu_template.h"
4683

    
4684
#endif
4685

    
4686
/* try to fill the TLB and return an exception if error. If retaddr is
4687
   NULL, it means that the function was called in C code (i.e. not
4688
   from generated code or from helper.c) */
4689
/* XXX: fix it to restore all registers */
4690
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4691
{
4692
    TranslationBlock *tb;
4693
    int ret;
4694
    unsigned long pc;
4695
    CPUX86State *saved_env;
4696

    
4697
    /* XXX: hack to restore env in all cases, even if not called from
4698
       generated code */
4699
    saved_env = env;
4700
    env = cpu_single_env;
4701

    
4702
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4703
    if (ret) {
4704
        if (retaddr) {
4705
            /* now we have a real cpu fault */
4706
            pc = (unsigned long)retaddr;
4707
            tb = tb_find_pc(pc);
4708
            if (tb) {
4709
                /* the PC is inside the translated code. It means that we have
4710
                   a virtual CPU fault */
4711
                cpu_restore_state(tb, env, pc, NULL);
4712
            }
4713
        }
4714
        raise_exception_err(env->exception_index, env->error_code);
4715
    }
4716
    env = saved_env;
4717
}
4718

    
4719

    
4720
/* Secure Virtual Machine helpers */
4721

    
4722
#if defined(CONFIG_USER_ONLY)
4723

    
4724
void helper_vmrun(void) 
4725
{ 
4726
}
4727
void helper_vmmcall(void) 
4728
{ 
4729
}
4730
void helper_vmload(void) 
4731
{ 
4732
}
4733
void helper_vmsave(void) 
4734
{ 
4735
}
4736
void helper_stgi(void)
4737
{
4738
}
4739
void helper_clgi(void)
4740
{
4741
}
4742
void helper_skinit(void) 
4743
{ 
4744
}
4745
void helper_invlpga(void) 
4746
{ 
4747
}
4748
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4749
{ 
4750
}
4751
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4752
{
4753
}
4754

    
4755
void helper_svm_check_io(uint32_t port, uint32_t param, 
4756
                         uint32_t next_eip_addend)
4757
{
4758
}
4759
#else
4760

    
4761
static inline void svm_save_seg(target_phys_addr_t addr,
4762
                                const SegmentCache *sc)
4763
{
4764
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4765
             sc->selector);
4766
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4767
             sc->base);
4768
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4769
             sc->limit);
4770
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4771
             (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4772
}
4773
                                
4774
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4775
{
4776
    unsigned int flags;
4777

    
4778
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4779
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4780
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4781
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4782
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4783
}
4784

    
4785
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4786
                                      CPUState *env, int seg_reg)
4787
{
4788
    SegmentCache sc1, *sc = &sc1;
4789
    svm_load_seg(addr, sc);
4790
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4791
                           sc->base, sc->limit, sc->flags);
4792
}
4793

    
4794
void helper_vmrun(void)
4795
{
4796
    target_ulong addr;
4797
    uint32_t event_inj;
4798
    uint32_t int_ctl;
4799

    
4800
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4801

    
4802
    addr = EAX;
4803
    if (loglevel & CPU_LOG_TB_IN_ASM)
4804
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4805

    
4806
    env->vm_vmcb = addr;
4807

    
4808
    /* save the current CPU state in the hsave page */
4809
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4810
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4811

    
4812
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4813
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4814

    
4815
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4816
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4817
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4818
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4819
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4820
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4821
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4822

    
4823
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4824
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4825

    
4826
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4827
                  &env->segs[R_ES]);
4828
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4829
                 &env->segs[R_CS]);
4830
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4831
                 &env->segs[R_SS]);
4832
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4833
                 &env->segs[R_DS]);
4834

    
4835
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4836
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4837
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4838

    
4839
    /* load the interception bitmaps so we do not need to access the
4840
       vmcb in svm mode */
4841
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4842
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4843
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4844
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4845
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4846
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4847

    
4848
    /* enable intercepts */
4849
    env->hflags |= HF_SVMI_MASK;
4850

    
4851
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4852
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4853

    
4854
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4855
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4856

    
4857
    /* clear exit_info_2 so we behave like the real hardware */
4858
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4859

    
4860
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4861
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4862
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4863
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4864
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4865
    if (int_ctl & V_INTR_MASKING_MASK) {
4866
        env->cr[8] = int_ctl & V_TPR_MASK;
4867
        cpu_set_apic_tpr(env, env->cr[8]);
4868
        if (env->eflags & IF_MASK)
4869
            env->hflags |= HF_HIF_MASK;
4870
    }
4871

    
4872
#ifdef TARGET_X86_64
4873
    cpu_load_efer(env, 
4874
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4875
#endif
4876
    env->eflags = 0;
4877
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4878
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4879
    CC_OP = CC_OP_EFLAGS;
4880

    
4881
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4882
                       env, R_ES);
4883
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4884
                       env, R_CS);
4885
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4886
                       env, R_SS);
4887
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4888
                       env, R_DS);
4889

    
4890
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4891
    env->eip = EIP;
4892
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4893
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4894
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4895
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4896
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4897

    
4898
    /* FIXME: guest state consistency checks */
4899

    
4900
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4901
        case TLB_CONTROL_DO_NOTHING:
4902
            break;
4903
        case TLB_CONTROL_FLUSH_ALL_ASID:
4904
            /* FIXME: this is not 100% correct but should work for now */
4905
            tlb_flush(env, 1);
4906
        break;
4907
    }
4908

    
4909
    helper_stgi();
4910

    
4911
    /* maybe we need to inject an event */
4912
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4913
    if (event_inj & SVM_EVTINJ_VALID) {
4914
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4915
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4916
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4917
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4918

    
4919
        if (loglevel & CPU_LOG_TB_IN_ASM)
4920
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4921
        /* FIXME: need to implement valid_err */
4922
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4923
        case SVM_EVTINJ_TYPE_INTR:
4924
                env->exception_index = vector;
4925
                env->error_code = event_inj_err;
4926
                env->exception_is_int = 0;
4927
                env->exception_next_eip = -1;
4928
                if (loglevel & CPU_LOG_TB_IN_ASM)
4929
                    fprintf(logfile, "INTR");
4930
                break;
4931
        case SVM_EVTINJ_TYPE_NMI:
4932
                env->exception_index = vector;
4933
                env->error_code = event_inj_err;
4934
                env->exception_is_int = 0;
4935
                env->exception_next_eip = EIP;
4936
                if (loglevel & CPU_LOG_TB_IN_ASM)
4937
                    fprintf(logfile, "NMI");
4938
                break;
4939
        case SVM_EVTINJ_TYPE_EXEPT:
4940
                env->exception_index = vector;
4941
                env->error_code = event_inj_err;
4942
                env->exception_is_int = 0;
4943
                env->exception_next_eip = -1;
4944
                if (loglevel & CPU_LOG_TB_IN_ASM)
4945
                    fprintf(logfile, "EXEPT");
4946
                break;
4947
        case SVM_EVTINJ_TYPE_SOFT:
4948
                env->exception_index = vector;
4949
                env->error_code = event_inj_err;
4950
                env->exception_is_int = 1;
4951
                env->exception_next_eip = EIP;
4952
                if (loglevel & CPU_LOG_TB_IN_ASM)
4953
                    fprintf(logfile, "SOFT");
4954
                break;
4955
        }
4956
        if (loglevel & CPU_LOG_TB_IN_ASM)
4957
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4958
    }
4959
    if ((int_ctl & V_IRQ_MASK) || 
4960
        (env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
4961
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4962
    }
4963

    
4964
    cpu_loop_exit();
4965
}
4966

    
4967
void helper_vmmcall(void)
4968
{
4969
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4970
    raise_exception(EXCP06_ILLOP);
4971
}
4972

    
4973
void helper_vmload(void)
4974
{
4975
    target_ulong addr;
4976
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4977

    
4978
    /* XXX: invalid in 32 bit */
4979
    addr = EAX;
4980
    if (loglevel & CPU_LOG_TB_IN_ASM)
4981
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4982
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4983
                env->segs[R_FS].base);
4984

    
4985
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4986
                       env, R_FS);
4987
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4988
                       env, R_GS);
4989
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4990
                 &env->tr);
4991
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4992
                 &env->ldt);
4993

    
4994
#ifdef TARGET_X86_64
4995
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4996
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4997
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4998
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4999
#endif
5000
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5001
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5002
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5003
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5004
}
5005

    
5006
void helper_vmsave(void)
5007
{
5008
    target_ulong addr;
5009
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5010
    addr = EAX;
5011
    if (loglevel & CPU_LOG_TB_IN_ASM)
5012
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5013
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5014
                env->segs[R_FS].base);
5015

    
5016
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5017
                 &env->segs[R_FS]);
5018
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5019
                 &env->segs[R_GS]);
5020
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5021
                 &env->tr);
5022
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5023
                 &env->ldt);
5024

    
5025
#ifdef TARGET_X86_64
5026
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5027
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5028
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5029
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5030
#endif
5031
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5032
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5033
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5034
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5035
}
5036

    
5037
void helper_stgi(void)
5038
{
5039
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5040
    env->hflags |= HF_GIF_MASK;
5041
}
5042

    
5043
void helper_clgi(void)
5044
{
5045
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5046
    env->hflags &= ~HF_GIF_MASK;
5047
}
5048

    
5049
void helper_skinit(void)
5050
{
5051
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5052
    /* XXX: not implemented */
5053
    if (loglevel & CPU_LOG_TB_IN_ASM)
5054
        fprintf(logfile,"skinit!\n");
5055
    raise_exception(EXCP06_ILLOP);
5056
}
5057

    
5058
void helper_invlpga(void)
5059
{
5060
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5061
    tlb_flush(env, 0);
5062
}
5063

    
5064
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5065
{
5066
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5067
        return;
5068
    switch(type) {
5069
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5070
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5071
            helper_vmexit(type, param);
5072
        }
5073
        break;
5074
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5075
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5076
            helper_vmexit(type, param);
5077
        }
5078
        break;
5079
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5080
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5081
            helper_vmexit(type, param);
5082
        }
5083
        break;
5084
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5085
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5086
            helper_vmexit(type, param);
5087
        }
5088
        break;
5089
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5090
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5091
            helper_vmexit(type, param);
5092
        }
5093
        break;
5094
    case SVM_EXIT_MSR:
5095
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5096
            /* FIXME: this should be read in at vmrun (faster this way?) */
5097
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5098
            uint32_t t0, t1;
5099
            switch((uint32_t)ECX) {
5100
            case 0 ... 0x1fff:
5101
                t0 = (ECX * 2) % 8;
5102
                t1 = ECX / 8;
5103
                break;
5104
            case 0xc0000000 ... 0xc0001fff:
5105
                t0 = (8192 + ECX - 0xc0000000) * 2;
5106
                t1 = (t0 / 8);
5107
                t0 %= 8;
5108
                break;
5109
            case 0xc0010000 ... 0xc0011fff:
5110
                t0 = (16384 + ECX - 0xc0010000) * 2;
5111
                t1 = (t0 / 8);
5112
                t0 %= 8;
5113
                break;
5114
            default:
5115
                helper_vmexit(type, param);
5116
                t0 = 0;
5117
                t1 = 0;
5118
                break;
5119
            }
5120
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5121
                helper_vmexit(type, param);
5122
        }
5123
        break;
5124
    default:
5125
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5126
            helper_vmexit(type, param);
5127
        }
5128
        break;
5129
    }
5130
}
5131

    
5132
void helper_svm_check_io(uint32_t port, uint32_t param, 
5133
                         uint32_t next_eip_addend)
5134
{
5135
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5136
        /* FIXME: this should be read in at vmrun (faster this way?) */
5137
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5138
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5139
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5140
            /* next EIP */
5141
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5142
                     env->eip + next_eip_addend);
5143
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5144
        }
5145
    }
5146
}
5147

    
5148
/* Note: currently only 32 bits of exit_code are used */
5149
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5150
{
5151
    uint32_t int_ctl;
5152

    
5153
    if (loglevel & CPU_LOG_TB_IN_ASM)
5154
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5155
                exit_code, exit_info_1,
5156
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5157
                EIP);
5158

    
5159
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5160
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5161
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5162
    } else {
5163
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5164
    }
5165

    
5166
    /* Save the VM state in the vmcb */
5167
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5168
                 &env->segs[R_ES]);
5169
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5170
                 &env->segs[R_CS]);
5171
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5172
                 &env->segs[R_SS]);
5173
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5174
                 &env->segs[R_DS]);
5175

    
5176
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5177
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5178

    
5179
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5180
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5181

    
5182
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5183
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5184
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5185
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5186
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5187

    
5188
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5189
        int_ctl &= ~V_TPR_MASK;
5190
        int_ctl |= env->cr[8] & V_TPR_MASK;
5191
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5192
    }
5193

    
5194
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5195
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5196
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5197
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5198
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5199
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5200
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5201

    
5202
    /* Reload the host state from vm_hsave */
5203
    env->hflags &= ~HF_HIF_MASK;
5204
    env->hflags &= ~HF_SVMI_MASK;
5205
    env->intercept = 0;
5206
    env->intercept_exceptions = 0;
5207
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5208

    
5209
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5210
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5211

    
5212
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5213
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5214

    
5215
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5216
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5217
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5218
    if (int_ctl & V_INTR_MASKING_MASK) {
5219
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5220
        cpu_set_apic_tpr(env, env->cr[8]);
5221
    }
5222
    /* we need to set the efer after the crs so the hidden flags get
5223
       set properly */
5224
#ifdef TARGET_X86_64
5225
    cpu_load_efer(env, 
5226
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5227
#endif
5228

    
5229
    env->eflags = 0;
5230
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5231
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5232
    CC_OP = CC_OP_EFLAGS;
5233

    
5234
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5235
                       env, R_ES);
5236
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5237
                       env, R_CS);
5238
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5239
                       env, R_SS);
5240
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5241
                       env, R_DS);
5242

    
5243
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5244
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5245
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5246

    
5247
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5248
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5249

    
5250
    /* other setups */
5251
    cpu_x86_set_cpl(env, 0);
5252
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5253
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5254

    
5255
    helper_clgi();
5256
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5257

    
5258
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5259

    
5260
    /* Clears the TSC_OFFSET inside the processor. */
5261

    
5262
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5263
       from the page table indicated the host's CR3. If the PDPEs contain
5264
       illegal state, the processor causes a shutdown. */
5265

    
5266
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5267
    env->cr[0] |= CR0_PE_MASK;
5268
    env->eflags &= ~VM_MASK;
5269

    
5270
    /* Disables all breakpoints in the host DR7 register. */
5271

    
5272
    /* Checks the reloaded host state for consistency. */
5273

    
5274
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5275
       host's code segment or non-canonical (in the case of long mode), a
5276
       #GP fault is delivered inside the host.) */
5277

    
5278
    /* remove any pending exception */
5279
    env->exception_index = -1;
5280
    env->error_code = 0;
5281
    env->old_exception = -1;
5282

    
5283
    cpu_loop_exit();
5284
}
5285

    
5286
#endif
5287

    
5288
/* MMX/SSE */
5289
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5290
void helper_enter_mmx(void)
5291
{
5292
    env->fpstt = 0;
5293
    *(uint32_t *)(env->fptags) = 0;
5294
    *(uint32_t *)(env->fptags + 4) = 0;
5295
}
5296

    
5297
void helper_emms(void)
5298
{
5299
    /* set to empty state */
5300
    *(uint32_t *)(env->fptags) = 0x01010101;
5301
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5302
}
5303

    
5304
/* XXX: suppress */
5305
void helper_movq(uint64_t *d, uint64_t *s)
5306
{
5307
    *d = *s;
5308
}
5309

    
5310
#define SHIFT 0
5311
#include "ops_sse.h"
5312

    
5313
#define SHIFT 1
5314
#include "ops_sse.h"
5315

    
5316
#define SHIFT 0
5317
#include "helper_template.h"
5318
#undef SHIFT
5319

    
5320
#define SHIFT 1
5321
#include "helper_template.h"
5322
#undef SHIFT
5323

    
5324
#define SHIFT 2
5325
#include "helper_template.h"
5326
#undef SHIFT
5327

    
5328
#ifdef TARGET_X86_64
5329

    
5330
#define SHIFT 3
5331
#include "helper_template.h"
5332
#undef SHIFT
5333

    
5334
#endif
5335

    
5336
/* bit operations */
5337
target_ulong helper_bsf(target_ulong t0)
5338
{
5339
    int count;
5340
    target_ulong res;
5341

    
5342
    res = t0;
5343
    count = 0;
5344
    while ((res & 1) == 0) {
5345
        count++;
5346
        res >>= 1;
5347
    }
5348
    return count;
5349
}
5350

    
5351
target_ulong helper_bsr(target_ulong t0)
5352
{
5353
    int count;
5354
    target_ulong res, mask;
5355
    
5356
    res = t0;
5357
    count = TARGET_LONG_BITS - 1;
5358
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5359
    while ((res & mask) == 0) {
5360
        count--;
5361
        res <<= 1;
5362
    }
5363
    return count;
5364
}
5365

    
5366

    
5367
static int compute_all_eflags(void)
5368
{
5369
    return CC_SRC;
5370
}
5371

    
5372
static int compute_c_eflags(void)
5373
{
5374
    return CC_SRC & CC_C;
5375
}
5376

    
5377
CCTable cc_table[CC_OP_NB] = {
5378
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5379

    
5380
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5381

    
5382
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5383
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5384
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5385

    
5386
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5387
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5388
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5389

    
5390
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5391
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5392
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5393

    
5394
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5395
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5396
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5397

    
5398
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5399
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5400
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5401

    
5402
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5403
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5404
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5405

    
5406
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5407
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5408
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5409

    
5410
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5411
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5412
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5413

    
5414
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5415
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5416
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5417

    
5418
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5419
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5420
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5421

    
5422
#ifdef TARGET_X86_64
5423
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5424

    
5425
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5426

    
5427
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5428

    
5429
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5430

    
5431
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5432

    
5433
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5434

    
5435
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5436

    
5437
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5438

    
5439
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5440

    
5441
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5442
#endif
5443
};
5444