Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ a7812ae4

History | View | Annotate | Download (153 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = helper_cc_compute_all(CC_OP);
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* in 64-bit machines, this can overflow. So this segment addition macro
594
 * can be used to trim the value to 32-bit whenever needed */
595
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
596

    
597
/* XXX: add a is_user flag to have proper security support */
598
#define PUSHW(ssp, sp, sp_mask, val)\
599
{\
600
    sp -= 2;\
601
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
602
}
603

    
604
#define PUSHL(ssp, sp, sp_mask, val)\
605
{\
606
    sp -= 4;\
607
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
608
}
609

    
610
#define POPW(ssp, sp, sp_mask, val)\
611
{\
612
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613
    sp += 2;\
614
}
615

    
616
#define POPL(ssp, sp, sp_mask, val)\
617
{\
618
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
619
    sp += 4;\
620
}
621

    
622
/* protected mode interrupt */
623
static void do_interrupt_protected(int intno, int is_int, int error_code,
624
                                   unsigned int next_eip, int is_hw)
625
{
626
    SegmentCache *dt;
627
    target_ulong ptr, ssp;
628
    int type, dpl, selector, ss_dpl, cpl;
629
    int has_error_code, new_stack, shift;
630
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631
    uint32_t old_eip, sp_mask;
632

    
633
    has_error_code = 0;
634
    if (!is_int && !is_hw) {
635
        switch(intno) {
636
        case 8:
637
        case 10:
638
        case 11:
639
        case 12:
640
        case 13:
641
        case 14:
642
        case 17:
643
            has_error_code = 1;
644
            break;
645
        }
646
    }
647
    if (is_int)
648
        old_eip = next_eip;
649
    else
650
        old_eip = env->eip;
651

    
652
    dt = &env->idt;
653
    if (intno * 8 + 7 > dt->limit)
654
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655
    ptr = dt->base + intno * 8;
656
    e1 = ldl_kernel(ptr);
657
    e2 = ldl_kernel(ptr + 4);
658
    /* check gate type */
659
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660
    switch(type) {
661
    case 5: /* task gate */
662
        /* must do that check here to return the correct error code */
663
        if (!(e2 & DESC_P_MASK))
664
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666
        if (has_error_code) {
667
            int type;
668
            uint32_t mask;
669
            /* push the error code */
670
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671
            shift = type >> 3;
672
            if (env->segs[R_SS].flags & DESC_B_MASK)
673
                mask = 0xffffffff;
674
            else
675
                mask = 0xffff;
676
            esp = (ESP - (2 << shift)) & mask;
677
            ssp = env->segs[R_SS].base + esp;
678
            if (shift)
679
                stl_kernel(ssp, error_code);
680
            else
681
                stw_kernel(ssp, error_code);
682
            SET_ESP(esp, mask);
683
        }
684
        return;
685
    case 6: /* 286 interrupt gate */
686
    case 7: /* 286 trap gate */
687
    case 14: /* 386 interrupt gate */
688
    case 15: /* 386 trap gate */
689
        break;
690
    default:
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
        break;
693
    }
694
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695
    cpl = env->hflags & HF_CPL_MASK;
696
    /* check privilege if software int */
697
    if (is_int && dpl < cpl)
698
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699
    /* check valid bit */
700
    if (!(e2 & DESC_P_MASK))
701
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702
    selector = e1 >> 16;
703
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704
    if ((selector & 0xfffc) == 0)
705
        raise_exception_err(EXCP0D_GPF, 0);
706

    
707
    if (load_segment(&e1, &e2, selector) != 0)
708
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712
    if (dpl > cpl)
713
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714
    if (!(e2 & DESC_P_MASK))
715
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717
        /* to inner privilege */
718
        get_ss_esp_from_tss(&ss, &esp, dpl);
719
        if ((ss & 0xfffc) == 0)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if ((ss & 3) != dpl)
722
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726
        if (ss_dpl != dpl)
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if (!(ss_e2 & DESC_S_MASK) ||
729
            (ss_e2 & DESC_CS_MASK) ||
730
            !(ss_e2 & DESC_W_MASK))
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        if (!(ss_e2 & DESC_P_MASK))
733
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734
        new_stack = 1;
735
        sp_mask = get_sp_mask(ss_e2);
736
        ssp = get_seg_base(ss_e1, ss_e2);
737
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738
        /* to same privilege */
739
        if (env->eflags & VM_MASK)
740
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0;
742
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
743
        ssp = env->segs[R_SS].base;
744
        esp = ESP;
745
        dpl = cpl;
746
    } else {
747
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748
        new_stack = 0; /* avoid warning */
749
        sp_mask = 0; /* avoid warning */
750
        ssp = 0; /* avoid warning */
751
        esp = 0; /* avoid warning */
752
    }
753

    
754
    shift = type >> 3;
755

    
756
#if 0
757
    /* XXX: check that enough room is available */
758
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759
    if (env->eflags & VM_MASK)
760
        push_size += 8;
761
    push_size <<= shift;
762
#endif
763
    if (shift == 1) {
764
        if (new_stack) {
765
            if (env->eflags & VM_MASK) {
766
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
770
            }
771
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772
            PUSHL(ssp, esp, sp_mask, ESP);
773
        }
774
        PUSHL(ssp, esp, sp_mask, compute_eflags());
775
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776
        PUSHL(ssp, esp, sp_mask, old_eip);
777
        if (has_error_code) {
778
            PUSHL(ssp, esp, sp_mask, error_code);
779
        }
780
    } else {
781
        if (new_stack) {
782
            if (env->eflags & VM_MASK) {
783
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
787
            }
788
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789
            PUSHW(ssp, esp, sp_mask, ESP);
790
        }
791
        PUSHW(ssp, esp, sp_mask, compute_eflags());
792
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793
        PUSHW(ssp, esp, sp_mask, old_eip);
794
        if (has_error_code) {
795
            PUSHW(ssp, esp, sp_mask, error_code);
796
        }
797
    }
798

    
799
    if (new_stack) {
800
        if (env->eflags & VM_MASK) {
801
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
805
        }
806
        ss = (ss & ~3) | dpl;
807
        cpu_x86_load_seg_cache(env, R_SS, ss,
808
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
809
    }
810
    SET_ESP(esp, sp_mask);
811

    
812
    selector = (selector & ~3) | dpl;
813
    cpu_x86_load_seg_cache(env, R_CS, selector,
814
                   get_seg_base(e1, e2),
815
                   get_seg_limit(e1, e2),
816
                   e2);
817
    cpu_x86_set_cpl(env, dpl);
818
    env->eip = offset;
819

    
820
    /* interrupt gate clear IF mask */
821
    if ((type & 1) == 0) {
822
        env->eflags &= ~IF_MASK;
823
    }
824
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825
}
826

    
827
#ifdef TARGET_X86_64
828

    
829
#define PUSHQ(sp, val)\
830
{\
831
    sp -= 8;\
832
    stq_kernel(sp, (val));\
833
}
834

    
835
#define POPQ(sp, val)\
836
{\
837
    val = ldq_kernel(sp);\
838
    sp += 8;\
839
}
840

    
841
static inline target_ulong get_rsp_from_tss(int level)
842
{
843
    int index;
844

    
845
#if 0
846
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847
           env->tr.base, env->tr.limit);
848
#endif
849

    
850
    if (!(env->tr.flags & DESC_P_MASK))
851
        cpu_abort(env, "invalid tss");
852
    index = 8 * level + 4;
853
    if ((index + 7) > env->tr.limit)
854
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855
    return ldq_kernel(env->tr.base + index);
856
}
857

    
858
/* 64 bit interrupt */
859
static void do_interrupt64(int intno, int is_int, int error_code,
860
                           target_ulong next_eip, int is_hw)
861
{
862
    SegmentCache *dt;
863
    target_ulong ptr;
864
    int type, dpl, selector, cpl, ist;
865
    int has_error_code, new_stack;
866
    uint32_t e1, e2, e3, ss;
867
    target_ulong old_eip, esp, offset;
868

    
869
    has_error_code = 0;
870
    if (!is_int && !is_hw) {
871
        switch(intno) {
872
        case 8:
873
        case 10:
874
        case 11:
875
        case 12:
876
        case 13:
877
        case 14:
878
        case 17:
879
            has_error_code = 1;
880
            break;
881
        }
882
    }
883
    if (is_int)
884
        old_eip = next_eip;
885
    else
886
        old_eip = env->eip;
887

    
888
    dt = &env->idt;
889
    if (intno * 16 + 15 > dt->limit)
890
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891
    ptr = dt->base + intno * 16;
892
    e1 = ldl_kernel(ptr);
893
    e2 = ldl_kernel(ptr + 4);
894
    e3 = ldl_kernel(ptr + 8);
895
    /* check gate type */
896
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897
    switch(type) {
898
    case 14: /* 386 interrupt gate */
899
    case 15: /* 386 trap gate */
900
        break;
901
    default:
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
        break;
904
    }
905
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906
    cpl = env->hflags & HF_CPL_MASK;
907
    /* check privilege if software int */
908
    if (is_int && dpl < cpl)
909
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910
    /* check valid bit */
911
    if (!(e2 & DESC_P_MASK))
912
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913
    selector = e1 >> 16;
914
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915
    ist = e2 & 7;
916
    if ((selector & 0xfffc) == 0)
917
        raise_exception_err(EXCP0D_GPF, 0);
918

    
919
    if (load_segment(&e1, &e2, selector) != 0)
920
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924
    if (dpl > cpl)
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    if (!(e2 & DESC_P_MASK))
927
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931
        /* to inner privilege */
932
        if (ist != 0)
933
            esp = get_rsp_from_tss(ist + 3);
934
        else
935
            esp = get_rsp_from_tss(dpl);
936
        esp &= ~0xfLL; /* align stack */
937
        ss = 0;
938
        new_stack = 1;
939
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940
        /* to same privilege */
941
        if (env->eflags & VM_MASK)
942
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
        new_stack = 0;
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = ESP;
948
        esp &= ~0xfLL; /* align stack */
949
        dpl = cpl;
950
    } else {
951
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952
        new_stack = 0; /* avoid warning */
953
        esp = 0; /* avoid warning */
954
    }
955

    
956
    PUSHQ(esp, env->segs[R_SS].selector);
957
    PUSHQ(esp, ESP);
958
    PUSHQ(esp, compute_eflags());
959
    PUSHQ(esp, env->segs[R_CS].selector);
960
    PUSHQ(esp, old_eip);
961
    if (has_error_code) {
962
        PUSHQ(esp, error_code);
963
    }
964

    
965
    if (new_stack) {
966
        ss = 0 | dpl;
967
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
968
    }
969
    ESP = esp;
970

    
971
    selector = (selector & ~3) | dpl;
972
    cpu_x86_load_seg_cache(env, R_CS, selector,
973
                   get_seg_base(e1, e2),
974
                   get_seg_limit(e1, e2),
975
                   e2);
976
    cpu_x86_set_cpl(env, dpl);
977
    env->eip = offset;
978

    
979
    /* interrupt gate clear IF mask */
980
    if ((type & 1) == 0) {
981
        env->eflags &= ~IF_MASK;
982
    }
983
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
984
}
985
#endif
986

    
987
#if defined(CONFIG_USER_ONLY)
988
void helper_syscall(int next_eip_addend)
989
{
990
    env->exception_index = EXCP_SYSCALL;
991
    env->exception_next_eip = env->eip + next_eip_addend;
992
    cpu_loop_exit();
993
}
994
#else
995
void helper_syscall(int next_eip_addend)
996
{
997
    int selector;
998

    
999
    if (!(env->efer & MSR_EFER_SCE)) {
1000
        raise_exception_err(EXCP06_ILLOP, 0);
1001
    }
1002
    selector = (env->star >> 32) & 0xffff;
1003
#ifdef TARGET_X86_64
1004
    if (env->hflags & HF_LMA_MASK) {
1005
        int code64;
1006

    
1007
        ECX = env->eip + next_eip_addend;
1008
        env->regs[11] = compute_eflags();
1009

    
1010
        code64 = env->hflags & HF_CS64_MASK;
1011

    
1012
        cpu_x86_set_cpl(env, 0);
1013
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014
                           0, 0xffffffff,
1015
                               DESC_G_MASK | DESC_P_MASK |
1016
                               DESC_S_MASK |
1017
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019
                               0, 0xffffffff,
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK |
1022
                               DESC_W_MASK | DESC_A_MASK);
1023
        env->eflags &= ~env->fmask;
1024
        load_eflags(env->eflags, 0);
1025
        if (code64)
1026
            env->eip = env->lstar;
1027
        else
1028
            env->eip = env->cstar;
1029
    } else
1030
#endif
1031
    {
1032
        ECX = (uint32_t)(env->eip + next_eip_addend);
1033

    
1034
        cpu_x86_set_cpl(env, 0);
1035
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036
                           0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041
                               0, 0xffffffff,
1042
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043
                               DESC_S_MASK |
1044
                               DESC_W_MASK | DESC_A_MASK);
1045
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046
        env->eip = (uint32_t)env->star;
1047
    }
1048
}
1049
#endif
1050

    
1051
void helper_sysret(int dflag)
1052
{
1053
    int cpl, selector;
1054

    
1055
    if (!(env->efer & MSR_EFER_SCE)) {
1056
        raise_exception_err(EXCP06_ILLOP, 0);
1057
    }
1058
    cpl = env->hflags & HF_CPL_MASK;
1059
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060
        raise_exception_err(EXCP0D_GPF, 0);
1061
    }
1062
    selector = (env->star >> 48) & 0xffff;
1063
#ifdef TARGET_X86_64
1064
    if (env->hflags & HF_LMA_MASK) {
1065
        if (dflag == 2) {
1066
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067
                                   0, 0xffffffff,
1068
                                   DESC_G_MASK | DESC_P_MASK |
1069
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071
                                   DESC_L_MASK);
1072
            env->eip = ECX;
1073
        } else {
1074
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075
                                   0, 0xffffffff,
1076
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079
            env->eip = (uint32_t)ECX;
1080
        }
1081
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082
                               0, 0xffffffff,
1083
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                               DESC_W_MASK | DESC_A_MASK);
1086
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088
        cpu_x86_set_cpl(env, 3);
1089
    } else
1090
#endif
1091
    {
1092
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097
        env->eip = (uint32_t)ECX;
1098
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099
                               0, 0xffffffff,
1100
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                               DESC_W_MASK | DESC_A_MASK);
1103
        env->eflags |= IF_MASK;
1104
        cpu_x86_set_cpl(env, 3);
1105
    }
1106
#ifdef USE_KQEMU
1107
    if (kqemu_is_ok(env)) {
1108
        if (env->hflags & HF_LMA_MASK)
1109
            CC_OP = CC_OP_EFLAGS;
1110
        env->exception_index = -1;
1111
        cpu_loop_exit();
1112
    }
1113
#endif
1114
}
1115

    
1116
/* real mode interrupt */
1117
static void do_interrupt_real(int intno, int is_int, int error_code,
1118
                              unsigned int next_eip)
1119
{
1120
    SegmentCache *dt;
1121
    target_ulong ptr, ssp;
1122
    int selector;
1123
    uint32_t offset, esp;
1124
    uint32_t old_cs, old_eip;
1125

    
1126
    /* real mode (simpler !) */
1127
    dt = &env->idt;
1128
    if (intno * 4 + 3 > dt->limit)
1129
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130
    ptr = dt->base + intno * 4;
1131
    offset = lduw_kernel(ptr);
1132
    selector = lduw_kernel(ptr + 2);
1133
    esp = ESP;
1134
    ssp = env->segs[R_SS].base;
1135
    if (is_int)
1136
        old_eip = next_eip;
1137
    else
1138
        old_eip = env->eip;
1139
    old_cs = env->segs[R_CS].selector;
1140
    /* XXX: use SS segment size ? */
1141
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1142
    PUSHW(ssp, esp, 0xffff, old_cs);
1143
    PUSHW(ssp, esp, 0xffff, old_eip);
1144

    
1145
    /* update processor state */
1146
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147
    env->eip = offset;
1148
    env->segs[R_CS].selector = selector;
1149
    env->segs[R_CS].base = (selector << 4);
1150
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151
}
1152

    
1153
/* fake user mode interrupt */
1154
void do_interrupt_user(int intno, int is_int, int error_code,
1155
                       target_ulong next_eip)
1156
{
1157
    SegmentCache *dt;
1158
    target_ulong ptr;
1159
    int dpl, cpl, shift;
1160
    uint32_t e2;
1161

    
1162
    dt = &env->idt;
1163
    if (env->hflags & HF_LMA_MASK) {
1164
        shift = 4;
1165
    } else {
1166
        shift = 3;
1167
    }
1168
    ptr = dt->base + (intno << shift);
1169
    e2 = ldl_kernel(ptr + 4);
1170

    
1171
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172
    cpl = env->hflags & HF_CPL_MASK;
1173
    /* check privilege if software int */
1174
    if (is_int && dpl < cpl)
1175
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176

    
1177
    /* Since we emulate only user space, we cannot do more than
1178
       exiting the emulation with the suitable exception and error
1179
       code */
1180
    if (is_int)
1181
        EIP = next_eip;
1182
}
1183

    
1184
/*
1185
 * Begin execution of an interruption. is_int is TRUE if coming from
1186
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187
 * instruction. It is only relevant if is_int is TRUE.
1188
 */
1189
void do_interrupt(int intno, int is_int, int error_code,
1190
                  target_ulong next_eip, int is_hw)
1191
{
1192
    if (loglevel & CPU_LOG_INT) {
1193
        if ((env->cr[0] & CR0_PE_MASK)) {
1194
            static int count;
1195
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196
                    count, intno, error_code, is_int,
1197
                    env->hflags & HF_CPL_MASK,
1198
                    env->segs[R_CS].selector, EIP,
1199
                    (int)env->segs[R_CS].base + EIP,
1200
                    env->segs[R_SS].selector, ESP);
1201
            if (intno == 0x0e) {
1202
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203
            } else {
1204
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1205
            }
1206
            fprintf(logfile, "\n");
1207
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208
#if 0
1209
            {
1210
                int i;
1211
                uint8_t *ptr;
1212
                fprintf(logfile, "       code=");
1213
                ptr = env->segs[R_CS].base + env->eip;
1214
                for(i = 0; i < 16; i++) {
1215
                    fprintf(logfile, " %02x", ldub(ptr + i));
1216
                }
1217
                fprintf(logfile, "\n");
1218
            }
1219
#endif
1220
            count++;
1221
        }
1222
    }
1223
    if (env->cr[0] & CR0_PE_MASK) {
1224
#ifdef TARGET_X86_64
1225
        if (env->hflags & HF_LMA_MASK) {
1226
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227
        } else
1228
#endif
1229
        {
1230
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1231
        }
1232
    } else {
1233
        do_interrupt_real(intno, is_int, error_code, next_eip);
1234
    }
1235
}
1236

    
1237
/*
1238
 * Check nested exceptions and change to double or triple fault if
1239
 * needed. It should only be called, if this is not an interrupt.
1240
 * Returns the new exception number.
1241
 */
1242
static int check_exception(int intno, int *error_code)
1243
{
1244
    int first_contributory = env->old_exception == 0 ||
1245
                              (env->old_exception >= 10 &&
1246
                               env->old_exception <= 13);
1247
    int second_contributory = intno == 0 ||
1248
                               (intno >= 10 && intno <= 13);
1249

    
1250
    if (loglevel & CPU_LOG_INT)
1251
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252
                env->old_exception, intno);
1253

    
1254
    if (env->old_exception == EXCP08_DBLE)
1255
        cpu_abort(env, "triple fault");
1256

    
1257
    if ((first_contributory && second_contributory)
1258
        || (env->old_exception == EXCP0E_PAGE &&
1259
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1260
        intno = EXCP08_DBLE;
1261
        *error_code = 0;
1262
    }
1263

    
1264
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1265
        (intno == EXCP08_DBLE))
1266
        env->old_exception = intno;
1267

    
1268
    return intno;
1269
}
1270

    
1271
/*
1272
 * Signal an interruption. It is executed in the main CPU loop.
1273
 * is_int is TRUE if coming from the int instruction. next_eip is the
1274
 * EIP value AFTER the interrupt instruction. It is only relevant if
1275
 * is_int is TRUE.
1276
 */
1277
void raise_interrupt(int intno, int is_int, int error_code,
1278
                     int next_eip_addend)
1279
{
1280
    if (!is_int) {
1281
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282
        intno = check_exception(intno, &error_code);
1283
    } else {
1284
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1285
    }
1286

    
1287
    env->exception_index = intno;
1288
    env->error_code = error_code;
1289
    env->exception_is_int = is_int;
1290
    env->exception_next_eip = env->eip + next_eip_addend;
1291
    cpu_loop_exit();
1292
}
1293

    
1294
/* shortcuts to generate exceptions */
1295

    
1296
void (raise_exception_err)(int exception_index, int error_code)
1297
{
1298
    raise_interrupt(exception_index, 0, error_code, 0);
1299
}
1300

    
1301
void raise_exception(int exception_index)
1302
{
1303
    raise_interrupt(exception_index, 0, 0, 0);
1304
}
1305

    
1306
/* SMM support */
1307

    
1308
#if defined(CONFIG_USER_ONLY)
1309

    
1310
void do_smm_enter(void)
1311
{
1312
}
1313

    
1314
void helper_rsm(void)
1315
{
1316
}
1317

    
1318
#else
1319

    
1320
#ifdef TARGET_X86_64
1321
#define SMM_REVISION_ID 0x00020064
1322
#else
1323
#define SMM_REVISION_ID 0x00020000
1324
#endif
1325

    
1326
void do_smm_enter(void)
1327
{
1328
    target_ulong sm_state;
1329
    SegmentCache *dt;
1330
    int i, offset;
1331

    
1332
    if (loglevel & CPU_LOG_INT) {
1333
        fprintf(logfile, "SMM: enter\n");
1334
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1335
    }
1336

    
1337
    env->hflags |= HF_SMM_MASK;
1338
    cpu_smm_update(env);
1339

    
1340
    sm_state = env->smbase + 0x8000;
1341

    
1342
#ifdef TARGET_X86_64
1343
    for(i = 0; i < 6; i++) {
1344
        dt = &env->segs[i];
1345
        offset = 0x7e00 + i * 16;
1346
        stw_phys(sm_state + offset, dt->selector);
1347
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348
        stl_phys(sm_state + offset + 4, dt->limit);
1349
        stq_phys(sm_state + offset + 8, dt->base);
1350
    }
1351

    
1352
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1353
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1354

    
1355
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1357
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1359

    
1360
    stq_phys(sm_state + 0x7e88, env->idt.base);
1361
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1362

    
1363
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1364
    stq_phys(sm_state + 0x7e98, env->tr.base);
1365
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1366
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1367

    
1368
    stq_phys(sm_state + 0x7ed0, env->efer);
1369

    
1370
    stq_phys(sm_state + 0x7ff8, EAX);
1371
    stq_phys(sm_state + 0x7ff0, ECX);
1372
    stq_phys(sm_state + 0x7fe8, EDX);
1373
    stq_phys(sm_state + 0x7fe0, EBX);
1374
    stq_phys(sm_state + 0x7fd8, ESP);
1375
    stq_phys(sm_state + 0x7fd0, EBP);
1376
    stq_phys(sm_state + 0x7fc8, ESI);
1377
    stq_phys(sm_state + 0x7fc0, EDI);
1378
    for(i = 8; i < 16; i++)
1379
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380
    stq_phys(sm_state + 0x7f78, env->eip);
1381
    stl_phys(sm_state + 0x7f70, compute_eflags());
1382
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1383
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1384

    
1385
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1386
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1387
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1388

    
1389
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390
    stl_phys(sm_state + 0x7f00, env->smbase);
1391
#else
1392
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1395
    stl_phys(sm_state + 0x7ff0, env->eip);
1396
    stl_phys(sm_state + 0x7fec, EDI);
1397
    stl_phys(sm_state + 0x7fe8, ESI);
1398
    stl_phys(sm_state + 0x7fe4, EBP);
1399
    stl_phys(sm_state + 0x7fe0, ESP);
1400
    stl_phys(sm_state + 0x7fdc, EBX);
1401
    stl_phys(sm_state + 0x7fd8, EDX);
1402
    stl_phys(sm_state + 0x7fd4, ECX);
1403
    stl_phys(sm_state + 0x7fd0, EAX);
1404
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1406

    
1407
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408
    stl_phys(sm_state + 0x7f64, env->tr.base);
1409
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1410
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1411

    
1412
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1414
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1416

    
1417
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1418
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1419

    
1420
    stl_phys(sm_state + 0x7f58, env->idt.base);
1421
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1422

    
1423
    for(i = 0; i < 6; i++) {
1424
        dt = &env->segs[i];
1425
        if (i < 3)
1426
            offset = 0x7f84 + i * 12;
1427
        else
1428
            offset = 0x7f2c + (i - 3) * 12;
1429
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430
        stl_phys(sm_state + offset + 8, dt->base);
1431
        stl_phys(sm_state + offset + 4, dt->limit);
1432
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1433
    }
1434
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1435

    
1436
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437
    stl_phys(sm_state + 0x7ef8, env->smbase);
1438
#endif
1439
    /* init SMM cpu state */
1440

    
1441
#ifdef TARGET_X86_64
1442
    cpu_load_efer(env, 0);
1443
#endif
1444
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445
    env->eip = 0x00008000;
1446
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447
                           0xffffffff, 0);
1448
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1453

    
1454
    cpu_x86_update_cr0(env,
1455
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456
    cpu_x86_update_cr4(env, 0);
1457
    env->dr[7] = 0x00000400;
1458
    CC_OP = CC_OP_EFLAGS;
1459
}
1460

    
1461
void helper_rsm(void)
1462
{
1463
    target_ulong sm_state;
1464
    int i, offset;
1465
    uint32_t val;
1466

    
1467
    sm_state = env->smbase + 0x8000;
1468
#ifdef TARGET_X86_64
1469
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1470

    
1471
    for(i = 0; i < 6; i++) {
1472
        offset = 0x7e00 + i * 16;
1473
        cpu_x86_load_seg_cache(env, i,
1474
                               lduw_phys(sm_state + offset),
1475
                               ldq_phys(sm_state + offset + 8),
1476
                               ldl_phys(sm_state + offset + 4),
1477
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1478
    }
1479

    
1480
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1482

    
1483
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1487

    
1488
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1489
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1490

    
1491
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1493
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1495

    
1496
    EAX = ldq_phys(sm_state + 0x7ff8);
1497
    ECX = ldq_phys(sm_state + 0x7ff0);
1498
    EDX = ldq_phys(sm_state + 0x7fe8);
1499
    EBX = ldq_phys(sm_state + 0x7fe0);
1500
    ESP = ldq_phys(sm_state + 0x7fd8);
1501
    EBP = ldq_phys(sm_state + 0x7fd0);
1502
    ESI = ldq_phys(sm_state + 0x7fc8);
1503
    EDI = ldq_phys(sm_state + 0x7fc0);
1504
    for(i = 8; i < 16; i++)
1505
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506
    env->eip = ldq_phys(sm_state + 0x7f78);
1507
    load_eflags(ldl_phys(sm_state + 0x7f70),
1508
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1511

    
1512
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1515

    
1516
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517
    if (val & 0x20000) {
1518
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1519
    }
1520
#else
1521
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1524
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525
    env->eip = ldl_phys(sm_state + 0x7ff0);
1526
    EDI = ldl_phys(sm_state + 0x7fec);
1527
    ESI = ldl_phys(sm_state + 0x7fe8);
1528
    EBP = ldl_phys(sm_state + 0x7fe4);
1529
    ESP = ldl_phys(sm_state + 0x7fe0);
1530
    EBX = ldl_phys(sm_state + 0x7fdc);
1531
    EDX = ldl_phys(sm_state + 0x7fd8);
1532
    ECX = ldl_phys(sm_state + 0x7fd4);
1533
    EAX = ldl_phys(sm_state + 0x7fd0);
1534
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1536

    
1537
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1539
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1541

    
1542
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1546

    
1547
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1549

    
1550
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1551
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1552

    
1553
    for(i = 0; i < 6; i++) {
1554
        if (i < 3)
1555
            offset = 0x7f84 + i * 12;
1556
        else
1557
            offset = 0x7f2c + (i - 3) * 12;
1558
        cpu_x86_load_seg_cache(env, i,
1559
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560
                               ldl_phys(sm_state + offset + 8),
1561
                               ldl_phys(sm_state + offset + 4),
1562
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1563
    }
1564
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1565

    
1566
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567
    if (val & 0x20000) {
1568
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1569
    }
1570
#endif
1571
    CC_OP = CC_OP_EFLAGS;
1572
    env->hflags &= ~HF_SMM_MASK;
1573
    cpu_smm_update(env);
1574

    
1575
    if (loglevel & CPU_LOG_INT) {
1576
        fprintf(logfile, "SMM: after RSM\n");
1577
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578
    }
1579
}
1580

    
1581
#endif /* !CONFIG_USER_ONLY */
1582

    
1583

    
1584
/* division, flags are undefined */
1585

    
1586
void helper_divb_AL(target_ulong t0)
1587
{
1588
    unsigned int num, den, q, r;
1589

    
1590
    num = (EAX & 0xffff);
1591
    den = (t0 & 0xff);
1592
    if (den == 0) {
1593
        raise_exception(EXCP00_DIVZ);
1594
    }
1595
    q = (num / den);
1596
    if (q > 0xff)
1597
        raise_exception(EXCP00_DIVZ);
1598
    q &= 0xff;
1599
    r = (num % den) & 0xff;
1600
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1601
}
1602

    
1603
void helper_idivb_AL(target_ulong t0)
1604
{
1605
    int num, den, q, r;
1606

    
1607
    num = (int16_t)EAX;
1608
    den = (int8_t)t0;
1609
    if (den == 0) {
1610
        raise_exception(EXCP00_DIVZ);
1611
    }
1612
    q = (num / den);
1613
    if (q != (int8_t)q)
1614
        raise_exception(EXCP00_DIVZ);
1615
    q &= 0xff;
1616
    r = (num % den) & 0xff;
1617
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1618
}
1619

    
1620
void helper_divw_AX(target_ulong t0)
1621
{
1622
    unsigned int num, den, q, r;
1623

    
1624
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625
    den = (t0 & 0xffff);
1626
    if (den == 0) {
1627
        raise_exception(EXCP00_DIVZ);
1628
    }
1629
    q = (num / den);
1630
    if (q > 0xffff)
1631
        raise_exception(EXCP00_DIVZ);
1632
    q &= 0xffff;
1633
    r = (num % den) & 0xffff;
1634
    EAX = (EAX & ~0xffff) | q;
1635
    EDX = (EDX & ~0xffff) | r;
1636
}
1637

    
1638
void helper_idivw_AX(target_ulong t0)
1639
{
1640
    int num, den, q, r;
1641

    
1642
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643
    den = (int16_t)t0;
1644
    if (den == 0) {
1645
        raise_exception(EXCP00_DIVZ);
1646
    }
1647
    q = (num / den);
1648
    if (q != (int16_t)q)
1649
        raise_exception(EXCP00_DIVZ);
1650
    q &= 0xffff;
1651
    r = (num % den) & 0xffff;
1652
    EAX = (EAX & ~0xffff) | q;
1653
    EDX = (EDX & ~0xffff) | r;
1654
}
1655

    
1656
void helper_divl_EAX(target_ulong t0)
1657
{
1658
    unsigned int den, r;
1659
    uint64_t num, q;
1660

    
1661
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662
    den = t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    r = (num % den);
1668
    if (q > 0xffffffff)
1669
        raise_exception(EXCP00_DIVZ);
1670
    EAX = (uint32_t)q;
1671
    EDX = (uint32_t)r;
1672
}
1673

    
1674
void helper_idivl_EAX(target_ulong t0)
1675
{
1676
    int den, r;
1677
    int64_t num, q;
1678

    
1679
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680
    den = t0;
1681
    if (den == 0) {
1682
        raise_exception(EXCP00_DIVZ);
1683
    }
1684
    q = (num / den);
1685
    r = (num % den);
1686
    if (q != (int32_t)q)
1687
        raise_exception(EXCP00_DIVZ);
1688
    EAX = (uint32_t)q;
1689
    EDX = (uint32_t)r;
1690
}
1691

    
1692
/* bcd */
1693

    
1694
/* XXX: exception */
1695
void helper_aam(int base)
1696
{
1697
    int al, ah;
1698
    al = EAX & 0xff;
1699
    ah = al / base;
1700
    al = al % base;
1701
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1702
    CC_DST = al;
1703
}
1704

    
1705
void helper_aad(int base)
1706
{
1707
    int al, ah;
1708
    al = EAX & 0xff;
1709
    ah = (EAX >> 8) & 0xff;
1710
    al = ((ah * base) + al) & 0xff;
1711
    EAX = (EAX & ~0xffff) | al;
1712
    CC_DST = al;
1713
}
1714

    
1715
void helper_aaa(void)
1716
{
1717
    int icarry;
1718
    int al, ah, af;
1719
    int eflags;
1720

    
1721
    eflags = helper_cc_compute_all(CC_OP);
1722
    af = eflags & CC_A;
1723
    al = EAX & 0xff;
1724
    ah = (EAX >> 8) & 0xff;
1725

    
1726
    icarry = (al > 0xf9);
1727
    if (((al & 0x0f) > 9 ) || af) {
1728
        al = (al + 6) & 0x0f;
1729
        ah = (ah + 1 + icarry) & 0xff;
1730
        eflags |= CC_C | CC_A;
1731
    } else {
1732
        eflags &= ~(CC_C | CC_A);
1733
        al &= 0x0f;
1734
    }
1735
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1736
    CC_SRC = eflags;
1737
    FORCE_RET();
1738
}
1739

    
1740
void helper_aas(void)
1741
{
1742
    int icarry;
1743
    int al, ah, af;
1744
    int eflags;
1745

    
1746
    eflags = helper_cc_compute_all(CC_OP);
1747
    af = eflags & CC_A;
1748
    al = EAX & 0xff;
1749
    ah = (EAX >> 8) & 0xff;
1750

    
1751
    icarry = (al < 6);
1752
    if (((al & 0x0f) > 9 ) || af) {
1753
        al = (al - 6) & 0x0f;
1754
        ah = (ah - 1 - icarry) & 0xff;
1755
        eflags |= CC_C | CC_A;
1756
    } else {
1757
        eflags &= ~(CC_C | CC_A);
1758
        al &= 0x0f;
1759
    }
1760
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1761
    CC_SRC = eflags;
1762
    FORCE_RET();
1763
}
1764

    
1765
void helper_daa(void)
1766
{
1767
    int al, af, cf;
1768
    int eflags;
1769

    
1770
    eflags = helper_cc_compute_all(CC_OP);
1771
    cf = eflags & CC_C;
1772
    af = eflags & CC_A;
1773
    al = EAX & 0xff;
1774

    
1775
    eflags = 0;
1776
    if (((al & 0x0f) > 9 ) || af) {
1777
        al = (al + 6) & 0xff;
1778
        eflags |= CC_A;
1779
    }
1780
    if ((al > 0x9f) || cf) {
1781
        al = (al + 0x60) & 0xff;
1782
        eflags |= CC_C;
1783
    }
1784
    EAX = (EAX & ~0xff) | al;
1785
    /* well, speed is not an issue here, so we compute the flags by hand */
1786
    eflags |= (al == 0) << 6; /* zf */
1787
    eflags |= parity_table[al]; /* pf */
1788
    eflags |= (al & 0x80); /* sf */
1789
    CC_SRC = eflags;
1790
    FORCE_RET();
1791
}
1792

    
1793
void helper_das(void)
1794
{
1795
    int al, al1, af, cf;
1796
    int eflags;
1797

    
1798
    eflags = helper_cc_compute_all(CC_OP);
1799
    cf = eflags & CC_C;
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802

    
1803
    eflags = 0;
1804
    al1 = al;
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        eflags |= CC_A;
1807
        if (al < 6 || cf)
1808
            eflags |= CC_C;
1809
        al = (al - 6) & 0xff;
1810
    }
1811
    if ((al1 > 0x99) || cf) {
1812
        al = (al - 0x60) & 0xff;
1813
        eflags |= CC_C;
1814
    }
1815
    EAX = (EAX & ~0xff) | al;
1816
    /* well, speed is not an issue here, so we compute the flags by hand */
1817
    eflags |= (al == 0) << 6; /* zf */
1818
    eflags |= parity_table[al]; /* pf */
1819
    eflags |= (al & 0x80); /* sf */
1820
    CC_SRC = eflags;
1821
    FORCE_RET();
1822
}
1823

    
1824
void helper_into(int next_eip_addend)
1825
{
1826
    int eflags;
1827
    eflags = helper_cc_compute_all(CC_OP);
1828
    if (eflags & CC_O) {
1829
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1830
    }
1831
}
1832

    
1833
void helper_cmpxchg8b(target_ulong a0)
1834
{
1835
    uint64_t d;
1836
    int eflags;
1837

    
1838
    eflags = helper_cc_compute_all(CC_OP);
1839
    d = ldq(a0);
1840
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842
        eflags |= CC_Z;
1843
    } else {
1844
        /* always do the store */
1845
        stq(a0, d); 
1846
        EDX = (uint32_t)(d >> 32);
1847
        EAX = (uint32_t)d;
1848
        eflags &= ~CC_Z;
1849
    }
1850
    CC_SRC = eflags;
1851
}
1852

    
1853
#ifdef TARGET_X86_64
1854
void helper_cmpxchg16b(target_ulong a0)
1855
{
1856
    uint64_t d0, d1;
1857
    int eflags;
1858

    
1859
    if ((a0 & 0xf) != 0)
1860
        raise_exception(EXCP0D_GPF);
1861
    eflags = helper_cc_compute_all(CC_OP);
1862
    d0 = ldq(a0);
1863
    d1 = ldq(a0 + 8);
1864
    if (d0 == EAX && d1 == EDX) {
1865
        stq(a0, EBX);
1866
        stq(a0 + 8, ECX);
1867
        eflags |= CC_Z;
1868
    } else {
1869
        /* always do the store */
1870
        stq(a0, d0); 
1871
        stq(a0 + 8, d1); 
1872
        EDX = d1;
1873
        EAX = d0;
1874
        eflags &= ~CC_Z;
1875
    }
1876
    CC_SRC = eflags;
1877
}
1878
#endif
1879

    
1880
void helper_single_step(void)
1881
{
1882
    env->dr[6] |= 0x4000;
1883
    raise_exception(EXCP01_SSTP);
1884
}
1885

    
1886
void helper_cpuid(void)
1887
{
1888
    uint32_t eax, ebx, ecx, edx;
1889

    
1890
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1891

    
1892
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1893
    EAX = eax;
1894
    EBX = ebx;
1895
    ECX = ecx;
1896
    EDX = edx;
1897
}
1898

    
1899
void helper_enter_level(int level, int data32, target_ulong t1)
1900
{
1901
    target_ulong ssp;
1902
    uint32_t esp_mask, esp, ebp;
1903

    
1904
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1905
    ssp = env->segs[R_SS].base;
1906
    ebp = EBP;
1907
    esp = ESP;
1908
    if (data32) {
1909
        /* 32 bit */
1910
        esp -= 4;
1911
        while (--level) {
1912
            esp -= 4;
1913
            ebp -= 4;
1914
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1915
        }
1916
        esp -= 4;
1917
        stl(ssp + (esp & esp_mask), t1);
1918
    } else {
1919
        /* 16 bit */
1920
        esp -= 2;
1921
        while (--level) {
1922
            esp -= 2;
1923
            ebp -= 2;
1924
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1925
        }
1926
        esp -= 2;
1927
        stw(ssp + (esp & esp_mask), t1);
1928
    }
1929
}
1930

    
1931
#ifdef TARGET_X86_64
1932
void helper_enter64_level(int level, int data64, target_ulong t1)
1933
{
1934
    target_ulong esp, ebp;
1935
    ebp = EBP;
1936
    esp = ESP;
1937

    
1938
    if (data64) {
1939
        /* 64 bit */
1940
        esp -= 8;
1941
        while (--level) {
1942
            esp -= 8;
1943
            ebp -= 8;
1944
            stq(esp, ldq(ebp));
1945
        }
1946
        esp -= 8;
1947
        stq(esp, t1);
1948
    } else {
1949
        /* 16 bit */
1950
        esp -= 2;
1951
        while (--level) {
1952
            esp -= 2;
1953
            ebp -= 2;
1954
            stw(esp, lduw(ebp));
1955
        }
1956
        esp -= 2;
1957
        stw(esp, t1);
1958
    }
1959
}
1960
#endif
1961

    
1962
void helper_lldt(int selector)
1963
{
1964
    SegmentCache *dt;
1965
    uint32_t e1, e2;
1966
    int index, entry_limit;
1967
    target_ulong ptr;
1968

    
1969
    selector &= 0xffff;
1970
    if ((selector & 0xfffc) == 0) {
1971
        /* XXX: NULL selector case: invalid LDT */
1972
        env->ldt.base = 0;
1973
        env->ldt.limit = 0;
1974
    } else {
1975
        if (selector & 0x4)
1976
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1977
        dt = &env->gdt;
1978
        index = selector & ~7;
1979
#ifdef TARGET_X86_64
1980
        if (env->hflags & HF_LMA_MASK)
1981
            entry_limit = 15;
1982
        else
1983
#endif
1984
            entry_limit = 7;
1985
        if ((index + entry_limit) > dt->limit)
1986
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1987
        ptr = dt->base + index;
1988
        e1 = ldl_kernel(ptr);
1989
        e2 = ldl_kernel(ptr + 4);
1990
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1991
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1992
        if (!(e2 & DESC_P_MASK))
1993
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1994
#ifdef TARGET_X86_64
1995
        if (env->hflags & HF_LMA_MASK) {
1996
            uint32_t e3;
1997
            e3 = ldl_kernel(ptr + 8);
1998
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1999
            env->ldt.base |= (target_ulong)e3 << 32;
2000
        } else
2001
#endif
2002
        {
2003
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2004
        }
2005
    }
2006
    env->ldt.selector = selector;
2007
}
2008

    
2009
void helper_ltr(int selector)
2010
{
2011
    SegmentCache *dt;
2012
    uint32_t e1, e2;
2013
    int index, type, entry_limit;
2014
    target_ulong ptr;
2015

    
2016
    selector &= 0xffff;
2017
    if ((selector & 0xfffc) == 0) {
2018
        /* NULL selector case: invalid TR */
2019
        env->tr.base = 0;
2020
        env->tr.limit = 0;
2021
        env->tr.flags = 0;
2022
    } else {
2023
        if (selector & 0x4)
2024
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2025
        dt = &env->gdt;
2026
        index = selector & ~7;
2027
#ifdef TARGET_X86_64
2028
        if (env->hflags & HF_LMA_MASK)
2029
            entry_limit = 15;
2030
        else
2031
#endif
2032
            entry_limit = 7;
2033
        if ((index + entry_limit) > dt->limit)
2034
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035
        ptr = dt->base + index;
2036
        e1 = ldl_kernel(ptr);
2037
        e2 = ldl_kernel(ptr + 4);
2038
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2039
        if ((e2 & DESC_S_MASK) ||
2040
            (type != 1 && type != 9))
2041
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2042
        if (!(e2 & DESC_P_MASK))
2043
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2044
#ifdef TARGET_X86_64
2045
        if (env->hflags & HF_LMA_MASK) {
2046
            uint32_t e3, e4;
2047
            e3 = ldl_kernel(ptr + 8);
2048
            e4 = ldl_kernel(ptr + 12);
2049
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2050
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2052
            env->tr.base |= (target_ulong)e3 << 32;
2053
        } else
2054
#endif
2055
        {
2056
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2057
        }
2058
        e2 |= DESC_TSS_BUSY_MASK;
2059
        stl_kernel(ptr + 4, e2);
2060
    }
2061
    env->tr.selector = selector;
2062
}
2063

    
2064
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2065
void helper_load_seg(int seg_reg, int selector)
2066
{
2067
    uint32_t e1, e2;
2068
    int cpl, dpl, rpl;
2069
    SegmentCache *dt;
2070
    int index;
2071
    target_ulong ptr;
2072

    
2073
    selector &= 0xffff;
2074
    cpl = env->hflags & HF_CPL_MASK;
2075
    if ((selector & 0xfffc) == 0) {
2076
        /* null selector case */
2077
        if (seg_reg == R_SS
2078
#ifdef TARGET_X86_64
2079
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2080
#endif
2081
            )
2082
            raise_exception_err(EXCP0D_GPF, 0);
2083
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2084
    } else {
2085

    
2086
        if (selector & 0x4)
2087
            dt = &env->ldt;
2088
        else
2089
            dt = &env->gdt;
2090
        index = selector & ~7;
2091
        if ((index + 7) > dt->limit)
2092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093
        ptr = dt->base + index;
2094
        e1 = ldl_kernel(ptr);
2095
        e2 = ldl_kernel(ptr + 4);
2096

    
2097
        if (!(e2 & DESC_S_MASK))
2098
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2099
        rpl = selector & 3;
2100
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2101
        if (seg_reg == R_SS) {
2102
            /* must be writable segment */
2103
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2104
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2105
            if (rpl != cpl || dpl != cpl)
2106
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2107
        } else {
2108
            /* must be readable segment */
2109
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2110
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2111

    
2112
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2113
                /* if not conforming code, test rights */
2114
                if (dpl < cpl || dpl < rpl)
2115
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116
            }
2117
        }
2118

    
2119
        if (!(e2 & DESC_P_MASK)) {
2120
            if (seg_reg == R_SS)
2121
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2122
            else
2123
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2124
        }
2125

    
2126
        /* set the access bit if not already set */
2127
        if (!(e2 & DESC_A_MASK)) {
2128
            e2 |= DESC_A_MASK;
2129
            stl_kernel(ptr + 4, e2);
2130
        }
2131

    
2132
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2133
                       get_seg_base(e1, e2),
2134
                       get_seg_limit(e1, e2),
2135
                       e2);
2136
#if 0
2137
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2138
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2139
#endif
2140
    }
2141
}
2142

    
2143
/* protected mode jump */
2144
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2145
                           int next_eip_addend)
2146
{
2147
    int gate_cs, type;
2148
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2149
    target_ulong next_eip;
2150

    
2151
    if ((new_cs & 0xfffc) == 0)
2152
        raise_exception_err(EXCP0D_GPF, 0);
2153
    if (load_segment(&e1, &e2, new_cs) != 0)
2154
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2155
    cpl = env->hflags & HF_CPL_MASK;
2156
    if (e2 & DESC_S_MASK) {
2157
        if (!(e2 & DESC_CS_MASK))
2158
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2159
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2160
        if (e2 & DESC_C_MASK) {
2161
            /* conforming code segment */
2162
            if (dpl > cpl)
2163
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2164
        } else {
2165
            /* non conforming code segment */
2166
            rpl = new_cs & 3;
2167
            if (rpl > cpl)
2168
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
            if (dpl != cpl)
2170
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171
        }
2172
        if (!(e2 & DESC_P_MASK))
2173
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2174
        limit = get_seg_limit(e1, e2);
2175
        if (new_eip > limit &&
2176
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2177
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2179
                       get_seg_base(e1, e2), limit, e2);
2180
        EIP = new_eip;
2181
    } else {
2182
        /* jump to call or task gate */
2183
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2184
        rpl = new_cs & 3;
2185
        cpl = env->hflags & HF_CPL_MASK;
2186
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2187
        switch(type) {
2188
        case 1: /* 286 TSS */
2189
        case 9: /* 386 TSS */
2190
        case 5: /* task gate */
2191
            if (dpl < cpl || dpl < rpl)
2192
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2193
            next_eip = env->eip + next_eip_addend;
2194
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2195
            CC_OP = CC_OP_EFLAGS;
2196
            break;
2197
        case 4: /* 286 call gate */
2198
        case 12: /* 386 call gate */
2199
            if ((dpl < cpl) || (dpl < rpl))
2200
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2201
            if (!(e2 & DESC_P_MASK))
2202
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2203
            gate_cs = e1 >> 16;
2204
            new_eip = (e1 & 0xffff);
2205
            if (type == 12)
2206
                new_eip |= (e2 & 0xffff0000);
2207
            if (load_segment(&e1, &e2, gate_cs) != 0)
2208
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2209
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2210
            /* must be code segment */
2211
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2212
                 (DESC_S_MASK | DESC_CS_MASK)))
2213
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2214
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2215
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2216
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2217
            if (!(e2 & DESC_P_MASK))
2218
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2219
            limit = get_seg_limit(e1, e2);
2220
            if (new_eip > limit)
2221
                raise_exception_err(EXCP0D_GPF, 0);
2222
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2223
                                   get_seg_base(e1, e2), limit, e2);
2224
            EIP = new_eip;
2225
            break;
2226
        default:
2227
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2228
            break;
2229
        }
2230
    }
2231
}
2232

    
2233
/* real mode call */
2234
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2235
                       int shift, int next_eip)
2236
{
2237
    int new_eip;
2238
    uint32_t esp, esp_mask;
2239
    target_ulong ssp;
2240

    
2241
    new_eip = new_eip1;
2242
    esp = ESP;
2243
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2244
    ssp = env->segs[R_SS].base;
2245
    if (shift) {
2246
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2247
        PUSHL(ssp, esp, esp_mask, next_eip);
2248
    } else {
2249
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2250
        PUSHW(ssp, esp, esp_mask, next_eip);
2251
    }
2252

    
2253
    SET_ESP(esp, esp_mask);
2254
    env->eip = new_eip;
2255
    env->segs[R_CS].selector = new_cs;
2256
    env->segs[R_CS].base = (new_cs << 4);
2257
}
2258

    
2259
/* protected mode call */
2260
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2261
                            int shift, int next_eip_addend)
2262
{
2263
    int new_stack, i;
2264
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2265
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2266
    uint32_t val, limit, old_sp_mask;
2267
    target_ulong ssp, old_ssp, next_eip;
2268

    
2269
    next_eip = env->eip + next_eip_addend;
2270
#ifdef DEBUG_PCALL
2271
    if (loglevel & CPU_LOG_PCALL) {
2272
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2273
                new_cs, (uint32_t)new_eip, shift);
2274
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2275
    }
2276
#endif
2277
    if ((new_cs & 0xfffc) == 0)
2278
        raise_exception_err(EXCP0D_GPF, 0);
2279
    if (load_segment(&e1, &e2, new_cs) != 0)
2280
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281
    cpl = env->hflags & HF_CPL_MASK;
2282
#ifdef DEBUG_PCALL
2283
    if (loglevel & CPU_LOG_PCALL) {
2284
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2285
    }
2286
#endif
2287
    if (e2 & DESC_S_MASK) {
2288
        if (!(e2 & DESC_CS_MASK))
2289
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2290
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2291
        if (e2 & DESC_C_MASK) {
2292
            /* conforming code segment */
2293
            if (dpl > cpl)
2294
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2295
        } else {
2296
            /* non conforming code segment */
2297
            rpl = new_cs & 3;
2298
            if (rpl > cpl)
2299
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2300
            if (dpl != cpl)
2301
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2302
        }
2303
        if (!(e2 & DESC_P_MASK))
2304
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2305

    
2306
#ifdef TARGET_X86_64
2307
        /* XXX: check 16/32 bit cases in long mode */
2308
        if (shift == 2) {
2309
            target_ulong rsp;
2310
            /* 64 bit case */
2311
            rsp = ESP;
2312
            PUSHQ(rsp, env->segs[R_CS].selector);
2313
            PUSHQ(rsp, next_eip);
2314
            /* from this point, not restartable */
2315
            ESP = rsp;
2316
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2317
                                   get_seg_base(e1, e2),
2318
                                   get_seg_limit(e1, e2), e2);
2319
            EIP = new_eip;
2320
        } else
2321
#endif
2322
        {
2323
            sp = ESP;
2324
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2325
            ssp = env->segs[R_SS].base;
2326
            if (shift) {
2327
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2328
                PUSHL(ssp, sp, sp_mask, next_eip);
2329
            } else {
2330
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2331
                PUSHW(ssp, sp, sp_mask, next_eip);
2332
            }
2333

    
2334
            limit = get_seg_limit(e1, e2);
2335
            if (new_eip > limit)
2336
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2337
            /* from this point, not restartable */
2338
            SET_ESP(sp, sp_mask);
2339
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2340
                                   get_seg_base(e1, e2), limit, e2);
2341
            EIP = new_eip;
2342
        }
2343
    } else {
2344
        /* check gate type */
2345
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2346
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2347
        rpl = new_cs & 3;
2348
        switch(type) {
2349
        case 1: /* available 286 TSS */
2350
        case 9: /* available 386 TSS */
2351
        case 5: /* task gate */
2352
            if (dpl < cpl || dpl < rpl)
2353
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2354
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2355
            CC_OP = CC_OP_EFLAGS;
2356
            return;
2357
        case 4: /* 286 call gate */
2358
        case 12: /* 386 call gate */
2359
            break;
2360
        default:
2361
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2362
            break;
2363
        }
2364
        shift = type >> 3;
2365

    
2366
        if (dpl < cpl || dpl < rpl)
2367
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2368
        /* check valid bit */
2369
        if (!(e2 & DESC_P_MASK))
2370
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2371
        selector = e1 >> 16;
2372
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2373
        param_count = e2 & 0x1f;
2374
        if ((selector & 0xfffc) == 0)
2375
            raise_exception_err(EXCP0D_GPF, 0);
2376

    
2377
        if (load_segment(&e1, &e2, selector) != 0)
2378
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2379
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2380
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2381
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2382
        if (dpl > cpl)
2383
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2384
        if (!(e2 & DESC_P_MASK))
2385
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2386

    
2387
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2388
            /* to inner privilege */
2389
            get_ss_esp_from_tss(&ss, &sp, dpl);
2390
#ifdef DEBUG_PCALL
2391
            if (loglevel & CPU_LOG_PCALL)
2392
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2393
                        ss, sp, param_count, ESP);
2394
#endif
2395
            if ((ss & 0xfffc) == 0)
2396
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2397
            if ((ss & 3) != dpl)
2398
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2399
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2400
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2401
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2402
            if (ss_dpl != dpl)
2403
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2404
            if (!(ss_e2 & DESC_S_MASK) ||
2405
                (ss_e2 & DESC_CS_MASK) ||
2406
                !(ss_e2 & DESC_W_MASK))
2407
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2408
            if (!(ss_e2 & DESC_P_MASK))
2409
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410

    
2411
            //            push_size = ((param_count * 2) + 8) << shift;
2412

    
2413
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2414
            old_ssp = env->segs[R_SS].base;
2415

    
2416
            sp_mask = get_sp_mask(ss_e2);
2417
            ssp = get_seg_base(ss_e1, ss_e2);
2418
            if (shift) {
2419
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2420
                PUSHL(ssp, sp, sp_mask, ESP);
2421
                for(i = param_count - 1; i >= 0; i--) {
2422
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2423
                    PUSHL(ssp, sp, sp_mask, val);
2424
                }
2425
            } else {
2426
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2427
                PUSHW(ssp, sp, sp_mask, ESP);
2428
                for(i = param_count - 1; i >= 0; i--) {
2429
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2430
                    PUSHW(ssp, sp, sp_mask, val);
2431
                }
2432
            }
2433
            new_stack = 1;
2434
        } else {
2435
            /* to same privilege */
2436
            sp = ESP;
2437
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2438
            ssp = env->segs[R_SS].base;
2439
            //            push_size = (4 << shift);
2440
            new_stack = 0;
2441
        }
2442

    
2443
        if (shift) {
2444
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2445
            PUSHL(ssp, sp, sp_mask, next_eip);
2446
        } else {
2447
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2448
            PUSHW(ssp, sp, sp_mask, next_eip);
2449
        }
2450

    
2451
        /* from this point, not restartable */
2452

    
2453
        if (new_stack) {
2454
            ss = (ss & ~3) | dpl;
2455
            cpu_x86_load_seg_cache(env, R_SS, ss,
2456
                                   ssp,
2457
                                   get_seg_limit(ss_e1, ss_e2),
2458
                                   ss_e2);
2459
        }
2460

    
2461
        selector = (selector & ~3) | dpl;
2462
        cpu_x86_load_seg_cache(env, R_CS, selector,
2463
                       get_seg_base(e1, e2),
2464
                       get_seg_limit(e1, e2),
2465
                       e2);
2466
        cpu_x86_set_cpl(env, dpl);
2467
        SET_ESP(sp, sp_mask);
2468
        EIP = offset;
2469
    }
2470
#ifdef USE_KQEMU
2471
    if (kqemu_is_ok(env)) {
2472
        env->exception_index = -1;
2473
        cpu_loop_exit();
2474
    }
2475
#endif
2476
}
2477

    
2478
/* real and vm86 mode iret */
2479
void helper_iret_real(int shift)
2480
{
2481
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2482
    target_ulong ssp;
2483
    int eflags_mask;
2484

    
2485
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2486
    sp = ESP;
2487
    ssp = env->segs[R_SS].base;
2488
    if (shift == 1) {
2489
        /* 32 bits */
2490
        POPL(ssp, sp, sp_mask, new_eip);
2491
        POPL(ssp, sp, sp_mask, new_cs);
2492
        new_cs &= 0xffff;
2493
        POPL(ssp, sp, sp_mask, new_eflags);
2494
    } else {
2495
        /* 16 bits */
2496
        POPW(ssp, sp, sp_mask, new_eip);
2497
        POPW(ssp, sp, sp_mask, new_cs);
2498
        POPW(ssp, sp, sp_mask, new_eflags);
2499
    }
2500
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2501
    env->segs[R_CS].selector = new_cs;
2502
    env->segs[R_CS].base = (new_cs << 4);
2503
    env->eip = new_eip;
2504
    if (env->eflags & VM_MASK)
2505
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2506
    else
2507
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2508
    if (shift == 0)
2509
        eflags_mask &= 0xffff;
2510
    load_eflags(new_eflags, eflags_mask);
2511
    env->hflags2 &= ~HF2_NMI_MASK;
2512
}
2513

    
2514
static inline void validate_seg(int seg_reg, int cpl)
2515
{
2516
    int dpl;
2517
    uint32_t e2;
2518

    
2519
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2520
       they may still contain a valid base. I would be interested to
2521
       know how a real x86_64 CPU behaves */
2522
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2523
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2524
        return;
2525

    
2526
    e2 = env->segs[seg_reg].flags;
2527
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2528
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2529
        /* data or non conforming code segment */
2530
        if (dpl < cpl) {
2531
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2532
        }
2533
    }
2534
}
2535

    
2536
/* protected mode iret */
2537
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2538
{
2539
    uint32_t new_cs, new_eflags, new_ss;
2540
    uint32_t new_es, new_ds, new_fs, new_gs;
2541
    uint32_t e1, e2, ss_e1, ss_e2;
2542
    int cpl, dpl, rpl, eflags_mask, iopl;
2543
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2544

    
2545
#ifdef TARGET_X86_64
2546
    if (shift == 2)
2547
        sp_mask = -1;
2548
    else
2549
#endif
2550
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2551
    sp = ESP;
2552
    ssp = env->segs[R_SS].base;
2553
    new_eflags = 0; /* avoid warning */
2554
#ifdef TARGET_X86_64
2555
    if (shift == 2) {
2556
        POPQ(sp, new_eip);
2557
        POPQ(sp, new_cs);
2558
        new_cs &= 0xffff;
2559
        if (is_iret) {
2560
            POPQ(sp, new_eflags);
2561
        }
2562
    } else
2563
#endif
2564
    if (shift == 1) {
2565
        /* 32 bits */
2566
        POPL(ssp, sp, sp_mask, new_eip);
2567
        POPL(ssp, sp, sp_mask, new_cs);
2568
        new_cs &= 0xffff;
2569
        if (is_iret) {
2570
            POPL(ssp, sp, sp_mask, new_eflags);
2571
            if (new_eflags & VM_MASK)
2572
                goto return_to_vm86;
2573
        }
2574
    } else {
2575
        /* 16 bits */
2576
        POPW(ssp, sp, sp_mask, new_eip);
2577
        POPW(ssp, sp, sp_mask, new_cs);
2578
        if (is_iret)
2579
            POPW(ssp, sp, sp_mask, new_eflags);
2580
    }
2581
#ifdef DEBUG_PCALL
2582
    if (loglevel & CPU_LOG_PCALL) {
2583
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2584
                new_cs, new_eip, shift, addend);
2585
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2586
    }
2587
#endif
2588
    if ((new_cs & 0xfffc) == 0)
2589
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2590
    if (load_segment(&e1, &e2, new_cs) != 0)
2591
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2592
    if (!(e2 & DESC_S_MASK) ||
2593
        !(e2 & DESC_CS_MASK))
2594
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2595
    cpl = env->hflags & HF_CPL_MASK;
2596
    rpl = new_cs & 3;
2597
    if (rpl < cpl)
2598
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2599
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2600
    if (e2 & DESC_C_MASK) {
2601
        if (dpl > rpl)
2602
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2603
    } else {
2604
        if (dpl != rpl)
2605
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2606
    }
2607
    if (!(e2 & DESC_P_MASK))
2608
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2609

    
2610
    sp += addend;
2611
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2612
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2613
        /* return to same privilege level */
2614
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2615
                       get_seg_base(e1, e2),
2616
                       get_seg_limit(e1, e2),
2617
                       e2);
2618
    } else {
2619
        /* return to different privilege level */
2620
#ifdef TARGET_X86_64
2621
        if (shift == 2) {
2622
            POPQ(sp, new_esp);
2623
            POPQ(sp, new_ss);
2624
            new_ss &= 0xffff;
2625
        } else
2626
#endif
2627
        if (shift == 1) {
2628
            /* 32 bits */
2629
            POPL(ssp, sp, sp_mask, new_esp);
2630
            POPL(ssp, sp, sp_mask, new_ss);
2631
            new_ss &= 0xffff;
2632
        } else {
2633
            /* 16 bits */
2634
            POPW(ssp, sp, sp_mask, new_esp);
2635
            POPW(ssp, sp, sp_mask, new_ss);
2636
        }
2637
#ifdef DEBUG_PCALL
2638
        if (loglevel & CPU_LOG_PCALL) {
2639
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2640
                    new_ss, new_esp);
2641
        }
2642
#endif
2643
        if ((new_ss & 0xfffc) == 0) {
2644
#ifdef TARGET_X86_64
2645
            /* NULL ss is allowed in long mode if cpl != 3*/
2646
            /* XXX: test CS64 ? */
2647
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2648
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2649
                                       0, 0xffffffff,
2650
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2651
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2652
                                       DESC_W_MASK | DESC_A_MASK);
2653
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2654
            } else
2655
#endif
2656
            {
2657
                raise_exception_err(EXCP0D_GPF, 0);
2658
            }
2659
        } else {
2660
            if ((new_ss & 3) != rpl)
2661
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2662
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2663
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2664
            if (!(ss_e2 & DESC_S_MASK) ||
2665
                (ss_e2 & DESC_CS_MASK) ||
2666
                !(ss_e2 & DESC_W_MASK))
2667
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2668
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2669
            if (dpl != rpl)
2670
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2671
            if (!(ss_e2 & DESC_P_MASK))
2672
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2673
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2674
                                   get_seg_base(ss_e1, ss_e2),
2675
                                   get_seg_limit(ss_e1, ss_e2),
2676
                                   ss_e2);
2677
        }
2678

    
2679
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2680
                       get_seg_base(e1, e2),
2681
                       get_seg_limit(e1, e2),
2682
                       e2);
2683
        cpu_x86_set_cpl(env, rpl);
2684
        sp = new_esp;
2685
#ifdef TARGET_X86_64
2686
        if (env->hflags & HF_CS64_MASK)
2687
            sp_mask = -1;
2688
        else
2689
#endif
2690
            sp_mask = get_sp_mask(ss_e2);
2691

    
2692
        /* validate data segments */
2693
        validate_seg(R_ES, rpl);
2694
        validate_seg(R_DS, rpl);
2695
        validate_seg(R_FS, rpl);
2696
        validate_seg(R_GS, rpl);
2697

    
2698
        sp += addend;
2699
    }
2700
    SET_ESP(sp, sp_mask);
2701
    env->eip = new_eip;
2702
    if (is_iret) {
2703
        /* NOTE: 'cpl' is the _old_ CPL */
2704
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2705
        if (cpl == 0)
2706
            eflags_mask |= IOPL_MASK;
2707
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2708
        if (cpl <= iopl)
2709
            eflags_mask |= IF_MASK;
2710
        if (shift == 0)
2711
            eflags_mask &= 0xffff;
2712
        load_eflags(new_eflags, eflags_mask);
2713
    }
2714
    return;
2715

    
2716
 return_to_vm86:
2717
    POPL(ssp, sp, sp_mask, new_esp);
2718
    POPL(ssp, sp, sp_mask, new_ss);
2719
    POPL(ssp, sp, sp_mask, new_es);
2720
    POPL(ssp, sp, sp_mask, new_ds);
2721
    POPL(ssp, sp, sp_mask, new_fs);
2722
    POPL(ssp, sp, sp_mask, new_gs);
2723

    
2724
    /* modify processor state */
2725
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2726
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2727
    load_seg_vm(R_CS, new_cs & 0xffff);
2728
    cpu_x86_set_cpl(env, 3);
2729
    load_seg_vm(R_SS, new_ss & 0xffff);
2730
    load_seg_vm(R_ES, new_es & 0xffff);
2731
    load_seg_vm(R_DS, new_ds & 0xffff);
2732
    load_seg_vm(R_FS, new_fs & 0xffff);
2733
    load_seg_vm(R_GS, new_gs & 0xffff);
2734

    
2735
    env->eip = new_eip & 0xffff;
2736
    ESP = new_esp;
2737
}
2738

    
2739
void helper_iret_protected(int shift, int next_eip)
2740
{
2741
    int tss_selector, type;
2742
    uint32_t e1, e2;
2743

    
2744
    /* specific case for TSS */
2745
    if (env->eflags & NT_MASK) {
2746
#ifdef TARGET_X86_64
2747
        if (env->hflags & HF_LMA_MASK)
2748
            raise_exception_err(EXCP0D_GPF, 0);
2749
#endif
2750
        tss_selector = lduw_kernel(env->tr.base + 0);
2751
        if (tss_selector & 4)
2752
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2753
        if (load_segment(&e1, &e2, tss_selector) != 0)
2754
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2755
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2756
        /* NOTE: we check both segment and busy TSS */
2757
        if (type != 3)
2758
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2759
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2760
    } else {
2761
        helper_ret_protected(shift, 1, 0);
2762
    }
2763
    env->hflags2 &= ~HF2_NMI_MASK;
2764
#ifdef USE_KQEMU
2765
    if (kqemu_is_ok(env)) {
2766
        CC_OP = CC_OP_EFLAGS;
2767
        env->exception_index = -1;
2768
        cpu_loop_exit();
2769
    }
2770
#endif
2771
}
2772

    
2773
void helper_lret_protected(int shift, int addend)
2774
{
2775
    helper_ret_protected(shift, 0, addend);
2776
#ifdef USE_KQEMU
2777
    if (kqemu_is_ok(env)) {
2778
        env->exception_index = -1;
2779
        cpu_loop_exit();
2780
    }
2781
#endif
2782
}
2783

    
2784
void helper_sysenter(void)
2785
{
2786
    if (env->sysenter_cs == 0) {
2787
        raise_exception_err(EXCP0D_GPF, 0);
2788
    }
2789
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2790
    cpu_x86_set_cpl(env, 0);
2791

    
2792
#ifdef TARGET_X86_64
2793
    if (env->hflags & HF_LMA_MASK) {
2794
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2795
                               0, 0xffffffff,
2796
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2797
                               DESC_S_MASK |
2798
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2799
    } else
2800
#endif
2801
    {
2802
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2803
                               0, 0xffffffff,
2804
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2805
                               DESC_S_MASK |
2806
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2807
    }
2808
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2809
                           0, 0xffffffff,
2810
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811
                           DESC_S_MASK |
2812
                           DESC_W_MASK | DESC_A_MASK);
2813
    ESP = env->sysenter_esp;
2814
    EIP = env->sysenter_eip;
2815
}
2816

    
2817
void helper_sysexit(int dflag)
2818
{
2819
    int cpl;
2820

    
2821
    cpl = env->hflags & HF_CPL_MASK;
2822
    if (env->sysenter_cs == 0 || cpl != 0) {
2823
        raise_exception_err(EXCP0D_GPF, 0);
2824
    }
2825
    cpu_x86_set_cpl(env, 3);
2826
#ifdef TARGET_X86_64
2827
    if (dflag == 2) {
2828
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2829
                               0, 0xffffffff,
2830
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2832
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2833
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2834
                               0, 0xffffffff,
2835
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2836
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2837
                               DESC_W_MASK | DESC_A_MASK);
2838
    } else
2839
#endif
2840
    {
2841
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2842
                               0, 0xffffffff,
2843
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2844
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2845
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2846
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2847
                               0, 0xffffffff,
2848
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2849
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2850
                               DESC_W_MASK | DESC_A_MASK);
2851
    }
2852
    ESP = ECX;
2853
    EIP = EDX;
2854
#ifdef USE_KQEMU
2855
    if (kqemu_is_ok(env)) {
2856
        env->exception_index = -1;
2857
        cpu_loop_exit();
2858
    }
2859
#endif
2860
}
2861

    
2862
#if defined(CONFIG_USER_ONLY)
2863
target_ulong helper_read_crN(int reg)
2864
{
2865
    return 0;
2866
}
2867

    
2868
void helper_write_crN(int reg, target_ulong t0)
2869
{
2870
}
2871
#else
2872
target_ulong helper_read_crN(int reg)
2873
{
2874
    target_ulong val;
2875

    
2876
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2877
    switch(reg) {
2878
    default:
2879
        val = env->cr[reg];
2880
        break;
2881
    case 8:
2882
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2883
            val = cpu_get_apic_tpr(env);
2884
        } else {
2885
            val = env->v_tpr;
2886
        }
2887
        break;
2888
    }
2889
    return val;
2890
}
2891

    
2892
void helper_write_crN(int reg, target_ulong t0)
2893
{
2894
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2895
    switch(reg) {
2896
    case 0:
2897
        cpu_x86_update_cr0(env, t0);
2898
        break;
2899
    case 3:
2900
        cpu_x86_update_cr3(env, t0);
2901
        break;
2902
    case 4:
2903
        cpu_x86_update_cr4(env, t0);
2904
        break;
2905
    case 8:
2906
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2907
            cpu_set_apic_tpr(env, t0);
2908
        }
2909
        env->v_tpr = t0 & 0x0f;
2910
        break;
2911
    default:
2912
        env->cr[reg] = t0;
2913
        break;
2914
    }
2915
}
2916
#endif
2917

    
2918
void helper_lmsw(target_ulong t0)
2919
{
2920
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2921
       if already set to one. */
2922
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2923
    helper_write_crN(0, t0);
2924
}
2925

    
2926
void helper_clts(void)
2927
{
2928
    env->cr[0] &= ~CR0_TS_MASK;
2929
    env->hflags &= ~HF_TS_MASK;
2930
}
2931

    
2932
/* XXX: do more */
2933
void helper_movl_drN_T0(int reg, target_ulong t0)
2934
{
2935
    env->dr[reg] = t0;
2936
}
2937

    
2938
void helper_invlpg(target_ulong addr)
2939
{
2940
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2941
    tlb_flush_page(env, addr);
2942
}
2943

    
2944
void helper_rdtsc(void)
2945
{
2946
    uint64_t val;
2947

    
2948
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2949
        raise_exception(EXCP0D_GPF);
2950
    }
2951
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2952

    
2953
    val = cpu_get_tsc(env) + env->tsc_offset;
2954
    EAX = (uint32_t)(val);
2955
    EDX = (uint32_t)(val >> 32);
2956
}
2957

    
2958
void helper_rdpmc(void)
2959
{
2960
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2961
        raise_exception(EXCP0D_GPF);
2962
    }
2963
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2964
    
2965
    /* currently unimplemented */
2966
    raise_exception_err(EXCP06_ILLOP, 0);
2967
}
2968

    
2969
#if defined(CONFIG_USER_ONLY)
2970
void helper_wrmsr(void)
2971
{
2972
}
2973

    
2974
void helper_rdmsr(void)
2975
{
2976
}
2977
#else
2978
void helper_wrmsr(void)
2979
{
2980
    uint64_t val;
2981

    
2982
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2983

    
2984
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2985

    
2986
    switch((uint32_t)ECX) {
2987
    case MSR_IA32_SYSENTER_CS:
2988
        env->sysenter_cs = val & 0xffff;
2989
        break;
2990
    case MSR_IA32_SYSENTER_ESP:
2991
        env->sysenter_esp = val;
2992
        break;
2993
    case MSR_IA32_SYSENTER_EIP:
2994
        env->sysenter_eip = val;
2995
        break;
2996
    case MSR_IA32_APICBASE:
2997
        cpu_set_apic_base(env, val);
2998
        break;
2999
    case MSR_EFER:
3000
        {
3001
            uint64_t update_mask;
3002
            update_mask = 0;
3003
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3004
                update_mask |= MSR_EFER_SCE;
3005
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3006
                update_mask |= MSR_EFER_LME;
3007
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3008
                update_mask |= MSR_EFER_FFXSR;
3009
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3010
                update_mask |= MSR_EFER_NXE;
3011
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3012
                update_mask |= MSR_EFER_SVME;
3013
            cpu_load_efer(env, (env->efer & ~update_mask) |
3014
                          (val & update_mask));
3015
        }
3016
        break;
3017
    case MSR_STAR:
3018
        env->star = val;
3019
        break;
3020
    case MSR_PAT:
3021
        env->pat = val;
3022
        break;
3023
    case MSR_VM_HSAVE_PA:
3024
        env->vm_hsave = val;
3025
        break;
3026
#ifdef TARGET_X86_64
3027
    case MSR_LSTAR:
3028
        env->lstar = val;
3029
        break;
3030
    case MSR_CSTAR:
3031
        env->cstar = val;
3032
        break;
3033
    case MSR_FMASK:
3034
        env->fmask = val;
3035
        break;
3036
    case MSR_FSBASE:
3037
        env->segs[R_FS].base = val;
3038
        break;
3039
    case MSR_GSBASE:
3040
        env->segs[R_GS].base = val;
3041
        break;
3042
    case MSR_KERNELGSBASE:
3043
        env->kernelgsbase = val;
3044
        break;
3045
#endif
3046
    default:
3047
        /* XXX: exception ? */
3048
        break;
3049
    }
3050
}
3051

    
3052
void helper_rdmsr(void)
3053
{
3054
    uint64_t val;
3055

    
3056
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3057

    
3058
    switch((uint32_t)ECX) {
3059
    case MSR_IA32_SYSENTER_CS:
3060
        val = env->sysenter_cs;
3061
        break;
3062
    case MSR_IA32_SYSENTER_ESP:
3063
        val = env->sysenter_esp;
3064
        break;
3065
    case MSR_IA32_SYSENTER_EIP:
3066
        val = env->sysenter_eip;
3067
        break;
3068
    case MSR_IA32_APICBASE:
3069
        val = cpu_get_apic_base(env);
3070
        break;
3071
    case MSR_EFER:
3072
        val = env->efer;
3073
        break;
3074
    case MSR_STAR:
3075
        val = env->star;
3076
        break;
3077
    case MSR_PAT:
3078
        val = env->pat;
3079
        break;
3080
    case MSR_VM_HSAVE_PA:
3081
        val = env->vm_hsave;
3082
        break;
3083
    case MSR_IA32_PERF_STATUS:
3084
        /* tsc_increment_by_tick */
3085
        val = 1000ULL;
3086
        /* CPU multiplier */
3087
        val |= (((uint64_t)4ULL) << 40);
3088
        break;
3089
#ifdef TARGET_X86_64
3090
    case MSR_LSTAR:
3091
        val = env->lstar;
3092
        break;
3093
    case MSR_CSTAR:
3094
        val = env->cstar;
3095
        break;
3096
    case MSR_FMASK:
3097
        val = env->fmask;
3098
        break;
3099
    case MSR_FSBASE:
3100
        val = env->segs[R_FS].base;
3101
        break;
3102
    case MSR_GSBASE:
3103
        val = env->segs[R_GS].base;
3104
        break;
3105
    case MSR_KERNELGSBASE:
3106
        val = env->kernelgsbase;
3107
        break;
3108
#endif
3109
#ifdef USE_KQEMU
3110
    case MSR_QPI_COMMBASE:
3111
        if (env->kqemu_enabled) {
3112
            val = kqemu_comm_base;
3113
        } else {
3114
            val = 0;
3115
        }
3116
        break;
3117
#endif
3118
    default:
3119
        /* XXX: exception ? */
3120
        val = 0;
3121
        break;
3122
    }
3123
    EAX = (uint32_t)(val);
3124
    EDX = (uint32_t)(val >> 32);
3125
}
3126
#endif
3127

    
3128
target_ulong helper_lsl(target_ulong selector1)
3129
{
3130
    unsigned int limit;
3131
    uint32_t e1, e2, eflags, selector;
3132
    int rpl, dpl, cpl, type;
3133

    
3134
    selector = selector1 & 0xffff;
3135
    eflags = helper_cc_compute_all(CC_OP);
3136
    if (load_segment(&e1, &e2, selector) != 0)
3137
        goto fail;
3138
    rpl = selector & 3;
3139
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3140
    cpl = env->hflags & HF_CPL_MASK;
3141
    if (e2 & DESC_S_MASK) {
3142
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3143
            /* conforming */
3144
        } else {
3145
            if (dpl < cpl || dpl < rpl)
3146
                goto fail;
3147
        }
3148
    } else {
3149
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3150
        switch(type) {
3151
        case 1:
3152
        case 2:
3153
        case 3:
3154
        case 9:
3155
        case 11:
3156
            break;
3157
        default:
3158
            goto fail;
3159
        }
3160
        if (dpl < cpl || dpl < rpl) {
3161
        fail:
3162
            CC_SRC = eflags & ~CC_Z;
3163
            return 0;
3164
        }
3165
    }
3166
    limit = get_seg_limit(e1, e2);
3167
    CC_SRC = eflags | CC_Z;
3168
    return limit;
3169
}
3170

    
3171
target_ulong helper_lar(target_ulong selector1)
3172
{
3173
    uint32_t e1, e2, eflags, selector;
3174
    int rpl, dpl, cpl, type;
3175

    
3176
    selector = selector1 & 0xffff;
3177
    eflags = helper_cc_compute_all(CC_OP);
3178
    if ((selector & 0xfffc) == 0)
3179
        goto fail;
3180
    if (load_segment(&e1, &e2, selector) != 0)
3181
        goto fail;
3182
    rpl = selector & 3;
3183
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3184
    cpl = env->hflags & HF_CPL_MASK;
3185
    if (e2 & DESC_S_MASK) {
3186
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3187
            /* conforming */
3188
        } else {
3189
            if (dpl < cpl || dpl < rpl)
3190
                goto fail;
3191
        }
3192
    } else {
3193
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3194
        switch(type) {
3195
        case 1:
3196
        case 2:
3197
        case 3:
3198
        case 4:
3199
        case 5:
3200
        case 9:
3201
        case 11:
3202
        case 12:
3203
            break;
3204
        default:
3205
            goto fail;
3206
        }
3207
        if (dpl < cpl || dpl < rpl) {
3208
        fail:
3209
            CC_SRC = eflags & ~CC_Z;
3210
            return 0;
3211
        }
3212
    }
3213
    CC_SRC = eflags | CC_Z;
3214
    return e2 & 0x00f0ff00;
3215
}
3216

    
3217
void helper_verr(target_ulong selector1)
3218
{
3219
    uint32_t e1, e2, eflags, selector;
3220
    int rpl, dpl, cpl;
3221

    
3222
    selector = selector1 & 0xffff;
3223
    eflags = helper_cc_compute_all(CC_OP);
3224
    if ((selector & 0xfffc) == 0)
3225
        goto fail;
3226
    if (load_segment(&e1, &e2, selector) != 0)
3227
        goto fail;
3228
    if (!(e2 & DESC_S_MASK))
3229
        goto fail;
3230
    rpl = selector & 3;
3231
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3232
    cpl = env->hflags & HF_CPL_MASK;
3233
    if (e2 & DESC_CS_MASK) {
3234
        if (!(e2 & DESC_R_MASK))
3235
            goto fail;
3236
        if (!(e2 & DESC_C_MASK)) {
3237
            if (dpl < cpl || dpl < rpl)
3238
                goto fail;
3239
        }
3240
    } else {
3241
        if (dpl < cpl || dpl < rpl) {
3242
        fail:
3243
            CC_SRC = eflags & ~CC_Z;
3244
            return;
3245
        }
3246
    }
3247
    CC_SRC = eflags | CC_Z;
3248
}
3249

    
3250
void helper_verw(target_ulong selector1)
3251
{
3252
    uint32_t e1, e2, eflags, selector;
3253
    int rpl, dpl, cpl;
3254

    
3255
    selector = selector1 & 0xffff;
3256
    eflags = helper_cc_compute_all(CC_OP);
3257
    if ((selector & 0xfffc) == 0)
3258
        goto fail;
3259
    if (load_segment(&e1, &e2, selector) != 0)
3260
        goto fail;
3261
    if (!(e2 & DESC_S_MASK))
3262
        goto fail;
3263
    rpl = selector & 3;
3264
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3265
    cpl = env->hflags & HF_CPL_MASK;
3266
    if (e2 & DESC_CS_MASK) {
3267
        goto fail;
3268
    } else {
3269
        if (dpl < cpl || dpl < rpl)
3270
            goto fail;
3271
        if (!(e2 & DESC_W_MASK)) {
3272
        fail:
3273
            CC_SRC = eflags & ~CC_Z;
3274
            return;
3275
        }
3276
    }
3277
    CC_SRC = eflags | CC_Z;
3278
}
3279

    
3280
/* x87 FPU helpers */
3281

    
3282
static void fpu_set_exception(int mask)
3283
{
3284
    env->fpus |= mask;
3285
    if (env->fpus & (~env->fpuc & FPUC_EM))
3286
        env->fpus |= FPUS_SE | FPUS_B;
3287
}
3288

    
3289
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3290
{
3291
    if (b == 0.0)
3292
        fpu_set_exception(FPUS_ZE);
3293
    return a / b;
3294
}
3295

    
3296
void fpu_raise_exception(void)
3297
{
3298
    if (env->cr[0] & CR0_NE_MASK) {
3299
        raise_exception(EXCP10_COPR);
3300
    }
3301
#if !defined(CONFIG_USER_ONLY)
3302
    else {
3303
        cpu_set_ferr(env);
3304
    }
3305
#endif
3306
}
3307

    
3308
void helper_flds_FT0(uint32_t val)
3309
{
3310
    union {
3311
        float32 f;
3312
        uint32_t i;
3313
    } u;
3314
    u.i = val;
3315
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3316
}
3317

    
3318
void helper_fldl_FT0(uint64_t val)
3319
{
3320
    union {
3321
        float64 f;
3322
        uint64_t i;
3323
    } u;
3324
    u.i = val;
3325
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3326
}
3327

    
3328
void helper_fildl_FT0(int32_t val)
3329
{
3330
    FT0 = int32_to_floatx(val, &env->fp_status);
3331
}
3332

    
3333
void helper_flds_ST0(uint32_t val)
3334
{
3335
    int new_fpstt;
3336
    union {
3337
        float32 f;
3338
        uint32_t i;
3339
    } u;
3340
    new_fpstt = (env->fpstt - 1) & 7;
3341
    u.i = val;
3342
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3343
    env->fpstt = new_fpstt;
3344
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3345
}
3346

    
3347
void helper_fldl_ST0(uint64_t val)
3348
{
3349
    int new_fpstt;
3350
    union {
3351
        float64 f;
3352
        uint64_t i;
3353
    } u;
3354
    new_fpstt = (env->fpstt - 1) & 7;
3355
    u.i = val;
3356
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3357
    env->fpstt = new_fpstt;
3358
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3359
}
3360

    
3361
void helper_fildl_ST0(int32_t val)
3362
{
3363
    int new_fpstt;
3364
    new_fpstt = (env->fpstt - 1) & 7;
3365
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3366
    env->fpstt = new_fpstt;
3367
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3368
}
3369

    
3370
void helper_fildll_ST0(int64_t val)
3371
{
3372
    int new_fpstt;
3373
    new_fpstt = (env->fpstt - 1) & 7;
3374
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3375
    env->fpstt = new_fpstt;
3376
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3377
}
3378

    
3379
uint32_t helper_fsts_ST0(void)
3380
{
3381
    union {
3382
        float32 f;
3383
        uint32_t i;
3384
    } u;
3385
    u.f = floatx_to_float32(ST0, &env->fp_status);
3386
    return u.i;
3387
}
3388

    
3389
uint64_t helper_fstl_ST0(void)
3390
{
3391
    union {
3392
        float64 f;
3393
        uint64_t i;
3394
    } u;
3395
    u.f = floatx_to_float64(ST0, &env->fp_status);
3396
    return u.i;
3397
}
3398

    
3399
int32_t helper_fist_ST0(void)
3400
{
3401
    int32_t val;
3402
    val = floatx_to_int32(ST0, &env->fp_status);
3403
    if (val != (int16_t)val)
3404
        val = -32768;
3405
    return val;
3406
}
3407

    
3408
int32_t helper_fistl_ST0(void)
3409
{
3410
    int32_t val;
3411
    val = floatx_to_int32(ST0, &env->fp_status);
3412
    return val;
3413
}
3414

    
3415
int64_t helper_fistll_ST0(void)
3416
{
3417
    int64_t val;
3418
    val = floatx_to_int64(ST0, &env->fp_status);
3419
    return val;
3420
}
3421

    
3422
int32_t helper_fistt_ST0(void)
3423
{
3424
    int32_t val;
3425
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3426
    if (val != (int16_t)val)
3427
        val = -32768;
3428
    return val;
3429
}
3430

    
3431
int32_t helper_fisttl_ST0(void)
3432
{
3433
    int32_t val;
3434
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3435
    return val;
3436
}
3437

    
3438
int64_t helper_fisttll_ST0(void)
3439
{
3440
    int64_t val;
3441
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3442
    return val;
3443
}
3444

    
3445
void helper_fldt_ST0(target_ulong ptr)
3446
{
3447
    int new_fpstt;
3448
    new_fpstt = (env->fpstt - 1) & 7;
3449
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3450
    env->fpstt = new_fpstt;
3451
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3452
}
3453

    
3454
void helper_fstt_ST0(target_ulong ptr)
3455
{
3456
    helper_fstt(ST0, ptr);
3457
}
3458

    
3459
void helper_fpush(void)
3460
{
3461
    fpush();
3462
}
3463

    
3464
void helper_fpop(void)
3465
{
3466
    fpop();
3467
}
3468

    
3469
void helper_fdecstp(void)
3470
{
3471
    env->fpstt = (env->fpstt - 1) & 7;
3472
    env->fpus &= (~0x4700);
3473
}
3474

    
3475
void helper_fincstp(void)
3476
{
3477
    env->fpstt = (env->fpstt + 1) & 7;
3478
    env->fpus &= (~0x4700);
3479
}
3480

    
3481
/* FPU move */
3482

    
3483
void helper_ffree_STN(int st_index)
3484
{
3485
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3486
}
3487

    
3488
void helper_fmov_ST0_FT0(void)
3489
{
3490
    ST0 = FT0;
3491
}
3492

    
3493
void helper_fmov_FT0_STN(int st_index)
3494
{
3495
    FT0 = ST(st_index);
3496
}
3497

    
3498
void helper_fmov_ST0_STN(int st_index)
3499
{
3500
    ST0 = ST(st_index);
3501
}
3502

    
3503
void helper_fmov_STN_ST0(int st_index)
3504
{
3505
    ST(st_index) = ST0;
3506
}
3507

    
3508
void helper_fxchg_ST0_STN(int st_index)
3509
{
3510
    CPU86_LDouble tmp;
3511
    tmp = ST(st_index);
3512
    ST(st_index) = ST0;
3513
    ST0 = tmp;
3514
}
3515

    
3516
/* FPU operations */
3517

    
3518
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3519

    
3520
void helper_fcom_ST0_FT0(void)
3521
{
3522
    int ret;
3523

    
3524
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3525
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3526
    FORCE_RET();
3527
}
3528

    
3529
void helper_fucom_ST0_FT0(void)
3530
{
3531
    int ret;
3532

    
3533
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3534
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3535
    FORCE_RET();
3536
}
3537

    
3538
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3539

    
3540
void helper_fcomi_ST0_FT0(void)
3541
{
3542
    int eflags;
3543
    int ret;
3544

    
3545
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3546
    eflags = helper_cc_compute_all(CC_OP);
3547
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3548
    CC_SRC = eflags;
3549
    FORCE_RET();
3550
}
3551

    
3552
void helper_fucomi_ST0_FT0(void)
3553
{
3554
    int eflags;
3555
    int ret;
3556

    
3557
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3558
    eflags = helper_cc_compute_all(CC_OP);
3559
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3560
    CC_SRC = eflags;
3561
    FORCE_RET();
3562
}
3563

    
3564
void helper_fadd_ST0_FT0(void)
3565
{
3566
    ST0 += FT0;
3567
}
3568

    
3569
void helper_fmul_ST0_FT0(void)
3570
{
3571
    ST0 *= FT0;
3572
}
3573

    
3574
void helper_fsub_ST0_FT0(void)
3575
{
3576
    ST0 -= FT0;
3577
}
3578

    
3579
void helper_fsubr_ST0_FT0(void)
3580
{
3581
    ST0 = FT0 - ST0;
3582
}
3583

    
3584
void helper_fdiv_ST0_FT0(void)
3585
{
3586
    ST0 = helper_fdiv(ST0, FT0);
3587
}
3588

    
3589
void helper_fdivr_ST0_FT0(void)
3590
{
3591
    ST0 = helper_fdiv(FT0, ST0);
3592
}
3593

    
3594
/* fp operations between STN and ST0 */
3595

    
3596
void helper_fadd_STN_ST0(int st_index)
3597
{
3598
    ST(st_index) += ST0;
3599
}
3600

    
3601
void helper_fmul_STN_ST0(int st_index)
3602
{
3603
    ST(st_index) *= ST0;
3604
}
3605

    
3606
void helper_fsub_STN_ST0(int st_index)
3607
{
3608
    ST(st_index) -= ST0;
3609
}
3610

    
3611
void helper_fsubr_STN_ST0(int st_index)
3612
{
3613
    CPU86_LDouble *p;
3614
    p = &ST(st_index);
3615
    *p = ST0 - *p;
3616
}
3617

    
3618
void helper_fdiv_STN_ST0(int st_index)
3619
{
3620
    CPU86_LDouble *p;
3621
    p = &ST(st_index);
3622
    *p = helper_fdiv(*p, ST0);
3623
}
3624

    
3625
void helper_fdivr_STN_ST0(int st_index)
3626
{
3627
    CPU86_LDouble *p;
3628
    p = &ST(st_index);
3629
    *p = helper_fdiv(ST0, *p);
3630
}
3631

    
3632
/* misc FPU operations */
3633
void helper_fchs_ST0(void)
3634
{
3635
    ST0 = floatx_chs(ST0);
3636
}
3637

    
3638
void helper_fabs_ST0(void)
3639
{
3640
    ST0 = floatx_abs(ST0);
3641
}
3642

    
3643
void helper_fld1_ST0(void)
3644
{
3645
    ST0 = f15rk[1];
3646
}
3647

    
3648
void helper_fldl2t_ST0(void)
3649
{
3650
    ST0 = f15rk[6];
3651
}
3652

    
3653
void helper_fldl2e_ST0(void)
3654
{
3655
    ST0 = f15rk[5];
3656
}
3657

    
3658
void helper_fldpi_ST0(void)
3659
{
3660
    ST0 = f15rk[2];
3661
}
3662

    
3663
void helper_fldlg2_ST0(void)
3664
{
3665
    ST0 = f15rk[3];
3666
}
3667

    
3668
void helper_fldln2_ST0(void)
3669
{
3670
    ST0 = f15rk[4];
3671
}
3672

    
3673
void helper_fldz_ST0(void)
3674
{
3675
    ST0 = f15rk[0];
3676
}
3677

    
3678
void helper_fldz_FT0(void)
3679
{
3680
    FT0 = f15rk[0];
3681
}
3682

    
3683
uint32_t helper_fnstsw(void)
3684
{
3685
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3686
}
3687

    
3688
uint32_t helper_fnstcw(void)
3689
{
3690
    return env->fpuc;
3691
}
3692

    
3693
static void update_fp_status(void)
3694
{
3695
    int rnd_type;
3696

    
3697
    /* set rounding mode */
3698
    switch(env->fpuc & RC_MASK) {
3699
    default:
3700
    case RC_NEAR:
3701
        rnd_type = float_round_nearest_even;
3702
        break;
3703
    case RC_DOWN:
3704
        rnd_type = float_round_down;
3705
        break;
3706
    case RC_UP:
3707
        rnd_type = float_round_up;
3708
        break;
3709
    case RC_CHOP:
3710
        rnd_type = float_round_to_zero;
3711
        break;
3712
    }
3713
    set_float_rounding_mode(rnd_type, &env->fp_status);
3714
#ifdef FLOATX80
3715
    switch((env->fpuc >> 8) & 3) {
3716
    case 0:
3717
        rnd_type = 32;
3718
        break;
3719
    case 2:
3720
        rnd_type = 64;
3721
        break;
3722
    case 3:
3723
    default:
3724
        rnd_type = 80;
3725
        break;
3726
    }
3727
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3728
#endif
3729
}
3730

    
3731
void helper_fldcw(uint32_t val)
3732
{
3733
    env->fpuc = val;
3734
    update_fp_status();
3735
}
3736

    
3737
void helper_fclex(void)
3738
{
3739
    env->fpus &= 0x7f00;
3740
}
3741

    
3742
void helper_fwait(void)
3743
{
3744
    if (env->fpus & FPUS_SE)
3745
        fpu_raise_exception();
3746
    FORCE_RET();
3747
}
3748

    
3749
void helper_fninit(void)
3750
{
3751
    env->fpus = 0;
3752
    env->fpstt = 0;
3753
    env->fpuc = 0x37f;
3754
    env->fptags[0] = 1;
3755
    env->fptags[1] = 1;
3756
    env->fptags[2] = 1;
3757
    env->fptags[3] = 1;
3758
    env->fptags[4] = 1;
3759
    env->fptags[5] = 1;
3760
    env->fptags[6] = 1;
3761
    env->fptags[7] = 1;
3762
}
3763

    
3764
/* BCD ops */
3765

    
3766
void helper_fbld_ST0(target_ulong ptr)
3767
{
3768
    CPU86_LDouble tmp;
3769
    uint64_t val;
3770
    unsigned int v;
3771
    int i;
3772

    
3773
    val = 0;
3774
    for(i = 8; i >= 0; i--) {
3775
        v = ldub(ptr + i);
3776
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3777
    }
3778
    tmp = val;
3779
    if (ldub(ptr + 9) & 0x80)
3780
        tmp = -tmp;
3781
    fpush();
3782
    ST0 = tmp;
3783
}
3784

    
3785
void helper_fbst_ST0(target_ulong ptr)
3786
{
3787
    int v;
3788
    target_ulong mem_ref, mem_end;
3789
    int64_t val;
3790

    
3791
    val = floatx_to_int64(ST0, &env->fp_status);
3792
    mem_ref = ptr;
3793
    mem_end = mem_ref + 9;
3794
    if (val < 0) {
3795
        stb(mem_end, 0x80);
3796
        val = -val;
3797
    } else {
3798
        stb(mem_end, 0x00);
3799
    }
3800
    while (mem_ref < mem_end) {
3801
        if (val == 0)
3802
            break;
3803
        v = val % 100;
3804
        val = val / 100;
3805
        v = ((v / 10) << 4) | (v % 10);
3806
        stb(mem_ref++, v);
3807
    }
3808
    while (mem_ref < mem_end) {
3809
        stb(mem_ref++, 0);
3810
    }
3811
}
3812

    
3813
void helper_f2xm1(void)
3814
{
3815
    ST0 = pow(2.0,ST0) - 1.0;
3816
}
3817

    
3818
void helper_fyl2x(void)
3819
{
3820
    CPU86_LDouble fptemp;
3821

    
3822
    fptemp = ST0;
3823
    if (fptemp>0.0){
3824
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3825
        ST1 *= fptemp;
3826
        fpop();
3827
    } else {
3828
        env->fpus &= (~0x4700);
3829
        env->fpus |= 0x400;
3830
    }
3831
}
3832

    
3833
void helper_fptan(void)
3834
{
3835
    CPU86_LDouble fptemp;
3836

    
3837
    fptemp = ST0;
3838
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3839
        env->fpus |= 0x400;
3840
    } else {
3841
        ST0 = tan(fptemp);
3842
        fpush();
3843
        ST0 = 1.0;
3844
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3845
        /* the above code is for  |arg| < 2**52 only */
3846
    }
3847
}
3848

    
3849
void helper_fpatan(void)
3850
{
3851
    CPU86_LDouble fptemp, fpsrcop;
3852

    
3853
    fpsrcop = ST1;
3854
    fptemp = ST0;
3855
    ST1 = atan2(fpsrcop,fptemp);
3856
    fpop();
3857
}
3858

    
3859
void helper_fxtract(void)
3860
{
3861
    CPU86_LDoubleU temp;
3862
    unsigned int expdif;
3863

    
3864
    temp.d = ST0;
3865
    expdif = EXPD(temp) - EXPBIAS;
3866
    /*DP exponent bias*/
3867
    ST0 = expdif;
3868
    fpush();
3869
    BIASEXPONENT(temp);
3870
    ST0 = temp.d;
3871
}
3872

    
3873
void helper_fprem1(void)
3874
{
3875
    CPU86_LDouble dblq, fpsrcop, fptemp;
3876
    CPU86_LDoubleU fpsrcop1, fptemp1;
3877
    int expdif;
3878
    signed long long int q;
3879

    
3880
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3881
        ST0 = 0.0 / 0.0; /* NaN */
3882
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3883
        return;
3884
    }
3885

    
3886
    fpsrcop = ST0;
3887
    fptemp = ST1;
3888
    fpsrcop1.d = fpsrcop;
3889
    fptemp1.d = fptemp;
3890
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3891

    
3892
    if (expdif < 0) {
3893
        /* optimisation? taken from the AMD docs */
3894
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3895
        /* ST0 is unchanged */
3896
        return;
3897
    }
3898

    
3899
    if (expdif < 53) {
3900
        dblq = fpsrcop / fptemp;
3901
        /* round dblq towards nearest integer */
3902
        dblq = rint(dblq);
3903
        ST0 = fpsrcop - fptemp * dblq;
3904

    
3905
        /* convert dblq to q by truncating towards zero */
3906
        if (dblq < 0.0)
3907
           q = (signed long long int)(-dblq);
3908
        else
3909
           q = (signed long long int)dblq;
3910

    
3911
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3912
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3913
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3914
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3915
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3916
    } else {
3917
        env->fpus |= 0x400;  /* C2 <-- 1 */
3918
        fptemp = pow(2.0, expdif - 50);
3919
        fpsrcop = (ST0 / ST1) / fptemp;
3920
        /* fpsrcop = integer obtained by chopping */
3921
        fpsrcop = (fpsrcop < 0.0) ?
3922
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3923
        ST0 -= (ST1 * fpsrcop * fptemp);
3924
    }
3925
}
3926

    
3927
void helper_fprem(void)
3928
{
3929
    CPU86_LDouble dblq, fpsrcop, fptemp;
3930
    CPU86_LDoubleU fpsrcop1, fptemp1;
3931
    int expdif;
3932
    signed long long int q;
3933

    
3934
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3935
       ST0 = 0.0 / 0.0; /* NaN */
3936
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3937
       return;
3938
    }
3939

    
3940
    fpsrcop = (CPU86_LDouble)ST0;
3941
    fptemp = (CPU86_LDouble)ST1;
3942
    fpsrcop1.d = fpsrcop;
3943
    fptemp1.d = fptemp;
3944
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3945

    
3946
    if (expdif < 0) {
3947
        /* optimisation? taken from the AMD docs */
3948
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3949
        /* ST0 is unchanged */
3950
        return;
3951
    }
3952

    
3953
    if ( expdif < 53 ) {
3954
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3955
        /* round dblq towards zero */
3956
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3957
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3958

    
3959
        /* convert dblq to q by truncating towards zero */
3960
        if (dblq < 0.0)
3961
           q = (signed long long int)(-dblq);
3962
        else
3963
           q = (signed long long int)dblq;
3964

    
3965
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3966
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3967
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3968
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3969
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3970
    } else {
3971
        int N = 32 + (expdif % 32); /* as per AMD docs */
3972
        env->fpus |= 0x400;  /* C2 <-- 1 */
3973
        fptemp = pow(2.0, (double)(expdif - N));
3974
        fpsrcop = (ST0 / ST1) / fptemp;
3975
        /* fpsrcop = integer obtained by chopping */
3976
        fpsrcop = (fpsrcop < 0.0) ?
3977
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3978
        ST0 -= (ST1 * fpsrcop * fptemp);
3979
    }
3980
}
3981

    
3982
void helper_fyl2xp1(void)
3983
{
3984
    CPU86_LDouble fptemp;
3985

    
3986
    fptemp = ST0;
3987
    if ((fptemp+1.0)>0.0) {
3988
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3989
        ST1 *= fptemp;
3990
        fpop();
3991
    } else {
3992
        env->fpus &= (~0x4700);
3993
        env->fpus |= 0x400;
3994
    }
3995
}
3996

    
3997
void helper_fsqrt(void)
3998
{
3999
    CPU86_LDouble fptemp;
4000

    
4001
    fptemp = ST0;
4002
    if (fptemp<0.0) {
4003
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4004
        env->fpus |= 0x400;
4005
    }
4006
    ST0 = sqrt(fptemp);
4007
}
4008

    
4009
void helper_fsincos(void)
4010
{
4011
    CPU86_LDouble fptemp;
4012

    
4013
    fptemp = ST0;
4014
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4015
        env->fpus |= 0x400;
4016
    } else {
4017
        ST0 = sin(fptemp);
4018
        fpush();
4019
        ST0 = cos(fptemp);
4020
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4021
        /* the above code is for  |arg| < 2**63 only */
4022
    }
4023
}
4024

    
4025
void helper_frndint(void)
4026
{
4027
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4028
}
4029

    
4030
void helper_fscale(void)
4031
{
4032
    ST0 = ldexp (ST0, (int)(ST1));
4033
}
4034

    
4035
void helper_fsin(void)
4036
{
4037
    CPU86_LDouble fptemp;
4038

    
4039
    fptemp = ST0;
4040
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4041
        env->fpus |= 0x400;
4042
    } else {
4043
        ST0 = sin(fptemp);
4044
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4045
        /* the above code is for  |arg| < 2**53 only */
4046
    }
4047
}
4048

    
4049
void helper_fcos(void)
4050
{
4051
    CPU86_LDouble fptemp;
4052

    
4053
    fptemp = ST0;
4054
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4055
        env->fpus |= 0x400;
4056
    } else {
4057
        ST0 = cos(fptemp);
4058
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4059
        /* the above code is for  |arg5 < 2**63 only */
4060
    }
4061
}
4062

    
4063
void helper_fxam_ST0(void)
4064
{
4065
    CPU86_LDoubleU temp;
4066
    int expdif;
4067

    
4068
    temp.d = ST0;
4069

    
4070
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4071
    if (SIGND(temp))
4072
        env->fpus |= 0x200; /* C1 <-- 1 */
4073

    
4074
    /* XXX: test fptags too */
4075
    expdif = EXPD(temp);
4076
    if (expdif == MAXEXPD) {
4077
#ifdef USE_X86LDOUBLE
4078
        if (MANTD(temp) == 0x8000000000000000ULL)
4079
#else
4080
        if (MANTD(temp) == 0)
4081
#endif
4082
            env->fpus |=  0x500 /*Infinity*/;
4083
        else
4084
            env->fpus |=  0x100 /*NaN*/;
4085
    } else if (expdif == 0) {
4086
        if (MANTD(temp) == 0)
4087
            env->fpus |=  0x4000 /*Zero*/;
4088
        else
4089
            env->fpus |= 0x4400 /*Denormal*/;
4090
    } else {
4091
        env->fpus |= 0x400;
4092
    }
4093
}
4094

    
4095
void helper_fstenv(target_ulong ptr, int data32)
4096
{
4097
    int fpus, fptag, exp, i;
4098
    uint64_t mant;
4099
    CPU86_LDoubleU tmp;
4100

    
4101
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4102
    fptag = 0;
4103
    for (i=7; i>=0; i--) {
4104
        fptag <<= 2;
4105
        if (env->fptags[i]) {
4106
            fptag |= 3;
4107
        } else {
4108
            tmp.d = env->fpregs[i].d;
4109
            exp = EXPD(tmp);
4110
            mant = MANTD(tmp);
4111
            if (exp == 0 && mant == 0) {
4112
                /* zero */
4113
                fptag |= 1;
4114
            } else if (exp == 0 || exp == MAXEXPD
4115
#ifdef USE_X86LDOUBLE
4116
                       || (mant & (1LL << 63)) == 0
4117
#endif
4118
                       ) {
4119
                /* NaNs, infinity, denormal */
4120
                fptag |= 2;
4121
            }
4122
        }
4123
    }
4124
    if (data32) {
4125
        /* 32 bit */
4126
        stl(ptr, env->fpuc);
4127
        stl(ptr + 4, fpus);
4128
        stl(ptr + 8, fptag);
4129
        stl(ptr + 12, 0); /* fpip */
4130
        stl(ptr + 16, 0); /* fpcs */
4131
        stl(ptr + 20, 0); /* fpoo */
4132
        stl(ptr + 24, 0); /* fpos */
4133
    } else {
4134
        /* 16 bit */
4135
        stw(ptr, env->fpuc);
4136
        stw(ptr + 2, fpus);
4137
        stw(ptr + 4, fptag);
4138
        stw(ptr + 6, 0);
4139
        stw(ptr + 8, 0);
4140
        stw(ptr + 10, 0);
4141
        stw(ptr + 12, 0);
4142
    }
4143
}
4144

    
4145
void helper_fldenv(target_ulong ptr, int data32)
4146
{
4147
    int i, fpus, fptag;
4148

    
4149
    if (data32) {
4150
        env->fpuc = lduw(ptr);
4151
        fpus = lduw(ptr + 4);
4152
        fptag = lduw(ptr + 8);
4153
    }
4154
    else {
4155
        env->fpuc = lduw(ptr);
4156
        fpus = lduw(ptr + 2);
4157
        fptag = lduw(ptr + 4);
4158
    }
4159
    env->fpstt = (fpus >> 11) & 7;
4160
    env->fpus = fpus & ~0x3800;
4161
    for(i = 0;i < 8; i++) {
4162
        env->fptags[i] = ((fptag & 3) == 3);
4163
        fptag >>= 2;
4164
    }
4165
}
4166

    
4167
void helper_fsave(target_ulong ptr, int data32)
4168
{
4169
    CPU86_LDouble tmp;
4170
    int i;
4171

    
4172
    helper_fstenv(ptr, data32);
4173

    
4174
    ptr += (14 << data32);
4175
    for(i = 0;i < 8; i++) {
4176
        tmp = ST(i);
4177
        helper_fstt(tmp, ptr);
4178
        ptr += 10;
4179
    }
4180

    
4181
    /* fninit */
4182
    env->fpus = 0;
4183
    env->fpstt = 0;
4184
    env->fpuc = 0x37f;
4185
    env->fptags[0] = 1;
4186
    env->fptags[1] = 1;
4187
    env->fptags[2] = 1;
4188
    env->fptags[3] = 1;
4189
    env->fptags[4] = 1;
4190
    env->fptags[5] = 1;
4191
    env->fptags[6] = 1;
4192
    env->fptags[7] = 1;
4193
}
4194

    
4195
void helper_frstor(target_ulong ptr, int data32)
4196
{
4197
    CPU86_LDouble tmp;
4198
    int i;
4199

    
4200
    helper_fldenv(ptr, data32);
4201
    ptr += (14 << data32);
4202

    
4203
    for(i = 0;i < 8; i++) {
4204
        tmp = helper_fldt(ptr);
4205
        ST(i) = tmp;
4206
        ptr += 10;
4207
    }
4208
}
4209

    
4210
void helper_fxsave(target_ulong ptr, int data64)
4211
{
4212
    int fpus, fptag, i, nb_xmm_regs;
4213
    CPU86_LDouble tmp;
4214
    target_ulong addr;
4215

    
4216
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4217
    fptag = 0;
4218
    for(i = 0; i < 8; i++) {
4219
        fptag |= (env->fptags[i] << i);
4220
    }
4221
    stw(ptr, env->fpuc);
4222
    stw(ptr + 2, fpus);
4223
    stw(ptr + 4, fptag ^ 0xff);
4224
#ifdef TARGET_X86_64
4225
    if (data64) {
4226
        stq(ptr + 0x08, 0); /* rip */
4227
        stq(ptr + 0x10, 0); /* rdp */
4228
    } else 
4229
#endif
4230
    {
4231
        stl(ptr + 0x08, 0); /* eip */
4232
        stl(ptr + 0x0c, 0); /* sel  */
4233
        stl(ptr + 0x10, 0); /* dp */
4234
        stl(ptr + 0x14, 0); /* sel  */
4235
    }
4236

    
4237
    addr = ptr + 0x20;
4238
    for(i = 0;i < 8; i++) {
4239
        tmp = ST(i);
4240
        helper_fstt(tmp, addr);
4241
        addr += 16;
4242
    }
4243

    
4244
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4245
        /* XXX: finish it */
4246
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4247
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4248
        if (env->hflags & HF_CS64_MASK)
4249
            nb_xmm_regs = 16;
4250
        else
4251
            nb_xmm_regs = 8;
4252
        addr = ptr + 0xa0;
4253
        for(i = 0; i < nb_xmm_regs; i++) {
4254
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4255
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4256
            addr += 16;
4257
        }
4258
    }
4259
}
4260

    
4261
void helper_fxrstor(target_ulong ptr, int data64)
4262
{
4263
    int i, fpus, fptag, nb_xmm_regs;
4264
    CPU86_LDouble tmp;
4265
    target_ulong addr;
4266

    
4267
    env->fpuc = lduw(ptr);
4268
    fpus = lduw(ptr + 2);
4269
    fptag = lduw(ptr + 4);
4270
    env->fpstt = (fpus >> 11) & 7;
4271
    env->fpus = fpus & ~0x3800;
4272
    fptag ^= 0xff;
4273
    for(i = 0;i < 8; i++) {
4274
        env->fptags[i] = ((fptag >> i) & 1);
4275
    }
4276

    
4277
    addr = ptr + 0x20;
4278
    for(i = 0;i < 8; i++) {
4279
        tmp = helper_fldt(addr);
4280
        ST(i) = tmp;
4281
        addr += 16;
4282
    }
4283

    
4284
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4285
        /* XXX: finish it */
4286
        env->mxcsr = ldl(ptr + 0x18);
4287
        //ldl(ptr + 0x1c);
4288
        if (env->hflags & HF_CS64_MASK)
4289
            nb_xmm_regs = 16;
4290
        else
4291
            nb_xmm_regs = 8;
4292
        addr = ptr + 0xa0;
4293
        for(i = 0; i < nb_xmm_regs; i++) {
4294
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4295
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4296
            addr += 16;
4297
        }
4298
    }
4299
}
4300

    
4301
#ifndef USE_X86LDOUBLE
4302

    
4303
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4304
{
4305
    CPU86_LDoubleU temp;
4306
    int e;
4307

    
4308
    temp.d = f;
4309
    /* mantissa */
4310
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4311
    /* exponent + sign */
4312
    e = EXPD(temp) - EXPBIAS + 16383;
4313
    e |= SIGND(temp) >> 16;
4314
    *pexp = e;
4315
}
4316

    
4317
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4318
{
4319
    CPU86_LDoubleU temp;
4320
    int e;
4321
    uint64_t ll;
4322

    
4323
    /* XXX: handle overflow ? */
4324
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4325
    e |= (upper >> 4) & 0x800; /* sign */
4326
    ll = (mant >> 11) & ((1LL << 52) - 1);
4327
#ifdef __arm__
4328
    temp.l.upper = (e << 20) | (ll >> 32);
4329
    temp.l.lower = ll;
4330
#else
4331
    temp.ll = ll | ((uint64_t)e << 52);
4332
#endif
4333
    return temp.d;
4334
}
4335

    
4336
#else
4337

    
4338
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4339
{
4340
    CPU86_LDoubleU temp;
4341

    
4342
    temp.d = f;
4343
    *pmant = temp.l.lower;
4344
    *pexp = temp.l.upper;
4345
}
4346

    
4347
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4348
{
4349
    CPU86_LDoubleU temp;
4350

    
4351
    temp.l.upper = upper;
4352
    temp.l.lower = mant;
4353
    return temp.d;
4354
}
4355
#endif
4356

    
4357
#ifdef TARGET_X86_64
4358

    
4359
//#define DEBUG_MULDIV
4360

    
4361
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4362
{
4363
    *plow += a;
4364
    /* carry test */
4365
    if (*plow < a)
4366
        (*phigh)++;
4367
    *phigh += b;
4368
}
4369

    
4370
static void neg128(uint64_t *plow, uint64_t *phigh)
4371
{
4372
    *plow = ~ *plow;
4373
    *phigh = ~ *phigh;
4374
    add128(plow, phigh, 1, 0);
4375
}
4376

    
4377
/* return TRUE if overflow */
4378
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4379
{
4380
    uint64_t q, r, a1, a0;
4381
    int i, qb, ab;
4382

    
4383
    a0 = *plow;
4384
    a1 = *phigh;
4385
    if (a1 == 0) {
4386
        q = a0 / b;
4387
        r = a0 % b;
4388
        *plow = q;
4389
        *phigh = r;
4390
    } else {
4391
        if (a1 >= b)
4392
            return 1;
4393
        /* XXX: use a better algorithm */
4394
        for(i = 0; i < 64; i++) {
4395
            ab = a1 >> 63;
4396
            a1 = (a1 << 1) | (a0 >> 63);
4397
            if (ab || a1 >= b) {
4398
                a1 -= b;
4399
                qb = 1;
4400
            } else {
4401
                qb = 0;
4402
            }
4403
            a0 = (a0 << 1) | qb;
4404
        }
4405
#if defined(DEBUG_MULDIV)
4406
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4407
               *phigh, *plow, b, a0, a1);
4408
#endif
4409
        *plow = a0;
4410
        *phigh = a1;
4411
    }
4412
    return 0;
4413
}
4414

    
4415
/* return TRUE if overflow */
4416
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4417
{
4418
    int sa, sb;
4419
    sa = ((int64_t)*phigh < 0);
4420
    if (sa)
4421
        neg128(plow, phigh);
4422
    sb = (b < 0);
4423
    if (sb)
4424
        b = -b;
4425
    if (div64(plow, phigh, b) != 0)
4426
        return 1;
4427
    if (sa ^ sb) {
4428
        if (*plow > (1ULL << 63))
4429
            return 1;
4430
        *plow = - *plow;
4431
    } else {
4432
        if (*plow >= (1ULL << 63))
4433
            return 1;
4434
    }
4435
    if (sa)
4436
        *phigh = - *phigh;
4437
    return 0;
4438
}
4439

    
4440
void helper_mulq_EAX_T0(target_ulong t0)
4441
{
4442
    uint64_t r0, r1;
4443

    
4444
    mulu64(&r0, &r1, EAX, t0);
4445
    EAX = r0;
4446
    EDX = r1;
4447
    CC_DST = r0;
4448
    CC_SRC = r1;
4449
}
4450

    
4451
void helper_imulq_EAX_T0(target_ulong t0)
4452
{
4453
    uint64_t r0, r1;
4454

    
4455
    muls64(&r0, &r1, EAX, t0);
4456
    EAX = r0;
4457
    EDX = r1;
4458
    CC_DST = r0;
4459
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4460
}
4461

    
4462
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4463
{
4464
    uint64_t r0, r1;
4465

    
4466
    muls64(&r0, &r1, t0, t1);
4467
    CC_DST = r0;
4468
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4469
    return r0;
4470
}
4471

    
4472
void helper_divq_EAX(target_ulong t0)
4473
{
4474
    uint64_t r0, r1;
4475
    if (t0 == 0) {
4476
        raise_exception(EXCP00_DIVZ);
4477
    }
4478
    r0 = EAX;
4479
    r1 = EDX;
4480
    if (div64(&r0, &r1, t0))
4481
        raise_exception(EXCP00_DIVZ);
4482
    EAX = r0;
4483
    EDX = r1;
4484
}
4485

    
4486
void helper_idivq_EAX(target_ulong t0)
4487
{
4488
    uint64_t r0, r1;
4489
    if (t0 == 0) {
4490
        raise_exception(EXCP00_DIVZ);
4491
    }
4492
    r0 = EAX;
4493
    r1 = EDX;
4494
    if (idiv64(&r0, &r1, t0))
4495
        raise_exception(EXCP00_DIVZ);
4496
    EAX = r0;
4497
    EDX = r1;
4498
}
4499
#endif
4500

    
4501
static void do_hlt(void)
4502
{
4503
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4504
    env->halted = 1;
4505
    env->exception_index = EXCP_HLT;
4506
    cpu_loop_exit();
4507
}
4508

    
4509
void helper_hlt(int next_eip_addend)
4510
{
4511
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4512
    EIP += next_eip_addend;
4513
    
4514
    do_hlt();
4515
}
4516

    
4517
void helper_monitor(target_ulong ptr)
4518
{
4519
    if ((uint32_t)ECX != 0)
4520
        raise_exception(EXCP0D_GPF);
4521
    /* XXX: store address ? */
4522
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4523
}
4524

    
4525
void helper_mwait(int next_eip_addend)
4526
{
4527
    if ((uint32_t)ECX != 0)
4528
        raise_exception(EXCP0D_GPF);
4529
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4530
    EIP += next_eip_addend;
4531

    
4532
    /* XXX: not complete but not completely erroneous */
4533
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4534
        /* more than one CPU: do not sleep because another CPU may
4535
           wake this one */
4536
    } else {
4537
        do_hlt();
4538
    }
4539
}
4540

    
4541
void helper_debug(void)
4542
{
4543
    env->exception_index = EXCP_DEBUG;
4544
    cpu_loop_exit();
4545
}
4546

    
4547
void helper_raise_interrupt(int intno, int next_eip_addend)
4548
{
4549
    raise_interrupt(intno, 1, 0, next_eip_addend);
4550
}
4551

    
4552
void helper_raise_exception(int exception_index)
4553
{
4554
    raise_exception(exception_index);
4555
}
4556

    
4557
void helper_cli(void)
4558
{
4559
    env->eflags &= ~IF_MASK;
4560
}
4561

    
4562
void helper_sti(void)
4563
{
4564
    env->eflags |= IF_MASK;
4565
}
4566

    
4567
#if 0
4568
/* vm86plus instructions */
4569
void helper_cli_vm(void)
4570
{
4571
    env->eflags &= ~VIF_MASK;
4572
}
4573

4574
void helper_sti_vm(void)
4575
{
4576
    env->eflags |= VIF_MASK;
4577
    if (env->eflags & VIP_MASK) {
4578
        raise_exception(EXCP0D_GPF);
4579
    }
4580
}
4581
#endif
4582

    
4583
void helper_set_inhibit_irq(void)
4584
{
4585
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4586
}
4587

    
4588
void helper_reset_inhibit_irq(void)
4589
{
4590
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4591
}
4592

    
4593
void helper_boundw(target_ulong a0, int v)
4594
{
4595
    int low, high;
4596
    low = ldsw(a0);
4597
    high = ldsw(a0 + 2);
4598
    v = (int16_t)v;
4599
    if (v < low || v > high) {
4600
        raise_exception(EXCP05_BOUND);
4601
    }
4602
    FORCE_RET();
4603
}
4604

    
4605
void helper_boundl(target_ulong a0, int v)
4606
{
4607
    int low, high;
4608
    low = ldl(a0);
4609
    high = ldl(a0 + 4);
4610
    if (v < low || v > high) {
4611
        raise_exception(EXCP05_BOUND);
4612
    }
4613
    FORCE_RET();
4614
}
4615

    
4616
static float approx_rsqrt(float a)
4617
{
4618
    return 1.0 / sqrt(a);
4619
}
4620

    
4621
static float approx_rcp(float a)
4622
{
4623
    return 1.0 / a;
4624
}
4625

    
4626
#if !defined(CONFIG_USER_ONLY)
4627

    
4628
#define MMUSUFFIX _mmu
4629

    
4630
#define SHIFT 0
4631
#include "softmmu_template.h"
4632

    
4633
#define SHIFT 1
4634
#include "softmmu_template.h"
4635

    
4636
#define SHIFT 2
4637
#include "softmmu_template.h"
4638

    
4639
#define SHIFT 3
4640
#include "softmmu_template.h"
4641

    
4642
#endif
4643

    
4644
/* try to fill the TLB and return an exception if error. If retaddr is
4645
   NULL, it means that the function was called in C code (i.e. not
4646
   from generated code or from helper.c) */
4647
/* XXX: fix it to restore all registers */
4648
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4649
{
4650
    TranslationBlock *tb;
4651
    int ret;
4652
    unsigned long pc;
4653
    CPUX86State *saved_env;
4654

    
4655
    /* XXX: hack to restore env in all cases, even if not called from
4656
       generated code */
4657
    saved_env = env;
4658
    env = cpu_single_env;
4659

    
4660
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4661
    if (ret) {
4662
        if (retaddr) {
4663
            /* now we have a real cpu fault */
4664
            pc = (unsigned long)retaddr;
4665
            tb = tb_find_pc(pc);
4666
            if (tb) {
4667
                /* the PC is inside the translated code. It means that we have
4668
                   a virtual CPU fault */
4669
                cpu_restore_state(tb, env, pc, NULL);
4670
            }
4671
        }
4672
        raise_exception_err(env->exception_index, env->error_code);
4673
    }
4674
    env = saved_env;
4675
}
4676

    
4677

    
4678
/* Secure Virtual Machine helpers */
4679

    
4680
#if defined(CONFIG_USER_ONLY)
4681

    
4682
void helper_vmrun(int aflag, int next_eip_addend)
4683
{ 
4684
}
4685
void helper_vmmcall(void) 
4686
{ 
4687
}
4688
void helper_vmload(int aflag)
4689
{ 
4690
}
4691
void helper_vmsave(int aflag)
4692
{ 
4693
}
4694
void helper_stgi(void)
4695
{
4696
}
4697
void helper_clgi(void)
4698
{
4699
}
4700
void helper_skinit(void) 
4701
{ 
4702
}
4703
void helper_invlpga(int aflag)
4704
{ 
4705
}
4706
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4707
{ 
4708
}
4709
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4710
{
4711
}
4712

    
4713
void helper_svm_check_io(uint32_t port, uint32_t param, 
4714
                         uint32_t next_eip_addend)
4715
{
4716
}
4717
#else
4718

    
4719
static inline void svm_save_seg(target_phys_addr_t addr,
4720
                                const SegmentCache *sc)
4721
{
4722
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4723
             sc->selector);
4724
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4725
             sc->base);
4726
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4727
             sc->limit);
4728
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4729
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4730
}
4731
                                
4732
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4733
{
4734
    unsigned int flags;
4735

    
4736
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4737
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4738
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4739
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4740
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4741
}
4742

    
4743
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4744
                                      CPUState *env, int seg_reg)
4745
{
4746
    SegmentCache sc1, *sc = &sc1;
4747
    svm_load_seg(addr, sc);
4748
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4749
                           sc->base, sc->limit, sc->flags);
4750
}
4751

    
4752
void helper_vmrun(int aflag, int next_eip_addend)
4753
{
4754
    target_ulong addr;
4755
    uint32_t event_inj;
4756
    uint32_t int_ctl;
4757

    
4758
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4759

    
4760
    if (aflag == 2)
4761
        addr = EAX;
4762
    else
4763
        addr = (uint32_t)EAX;
4764

    
4765
    if (loglevel & CPU_LOG_TB_IN_ASM)
4766
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4767

    
4768
    env->vm_vmcb = addr;
4769

    
4770
    /* save the current CPU state in the hsave page */
4771
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4772
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4773

    
4774
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4775
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4776

    
4777
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4778
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4779
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4780
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4781
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4782
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4783

    
4784
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4785
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4786

    
4787
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4788
                  &env->segs[R_ES]);
4789
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4790
                 &env->segs[R_CS]);
4791
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4792
                 &env->segs[R_SS]);
4793
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4794
                 &env->segs[R_DS]);
4795

    
4796
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4797
             EIP + next_eip_addend);
4798
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4799
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4800

    
4801
    /* load the interception bitmaps so we do not need to access the
4802
       vmcb in svm mode */
4803
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4804
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4805
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4806
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4807
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4808
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4809

    
4810
    /* enable intercepts */
4811
    env->hflags |= HF_SVMI_MASK;
4812

    
4813
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4814

    
4815
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4816
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4817

    
4818
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4819
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4820

    
4821
    /* clear exit_info_2 so we behave like the real hardware */
4822
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4823

    
4824
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4825
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4826
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4827
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4828
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4829
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4830
    if (int_ctl & V_INTR_MASKING_MASK) {
4831
        env->v_tpr = int_ctl & V_TPR_MASK;
4832
        env->hflags2 |= HF2_VINTR_MASK;
4833
        if (env->eflags & IF_MASK)
4834
            env->hflags2 |= HF2_HIF_MASK;
4835
    }
4836

    
4837
    cpu_load_efer(env, 
4838
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4839
    env->eflags = 0;
4840
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4841
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4842
    CC_OP = CC_OP_EFLAGS;
4843

    
4844
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4845
                       env, R_ES);
4846
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4847
                       env, R_CS);
4848
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4849
                       env, R_SS);
4850
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4851
                       env, R_DS);
4852

    
4853
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4854
    env->eip = EIP;
4855
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4856
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4857
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4858
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4859
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4860

    
4861
    /* FIXME: guest state consistency checks */
4862

    
4863
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4864
        case TLB_CONTROL_DO_NOTHING:
4865
            break;
4866
        case TLB_CONTROL_FLUSH_ALL_ASID:
4867
            /* FIXME: this is not 100% correct but should work for now */
4868
            tlb_flush(env, 1);
4869
        break;
4870
    }
4871

    
4872
    env->hflags2 |= HF2_GIF_MASK;
4873

    
4874
    if (int_ctl & V_IRQ_MASK) {
4875
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4876
    }
4877

    
4878
    /* maybe we need to inject an event */
4879
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4880
    if (event_inj & SVM_EVTINJ_VALID) {
4881
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4882
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4883
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4884
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4885

    
4886
        if (loglevel & CPU_LOG_TB_IN_ASM)
4887
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4888
        /* FIXME: need to implement valid_err */
4889
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4890
        case SVM_EVTINJ_TYPE_INTR:
4891
                env->exception_index = vector;
4892
                env->error_code = event_inj_err;
4893
                env->exception_is_int = 0;
4894
                env->exception_next_eip = -1;
4895
                if (loglevel & CPU_LOG_TB_IN_ASM)
4896
                    fprintf(logfile, "INTR");
4897
                /* XXX: is it always correct ? */
4898
                do_interrupt(vector, 0, 0, 0, 1);
4899
                break;
4900
        case SVM_EVTINJ_TYPE_NMI:
4901
                env->exception_index = EXCP02_NMI;
4902
                env->error_code = event_inj_err;
4903
                env->exception_is_int = 0;
4904
                env->exception_next_eip = EIP;
4905
                if (loglevel & CPU_LOG_TB_IN_ASM)
4906
                    fprintf(logfile, "NMI");
4907
                cpu_loop_exit();
4908
                break;
4909
        case SVM_EVTINJ_TYPE_EXEPT:
4910
                env->exception_index = vector;
4911
                env->error_code = event_inj_err;
4912
                env->exception_is_int = 0;
4913
                env->exception_next_eip = -1;
4914
                if (loglevel & CPU_LOG_TB_IN_ASM)
4915
                    fprintf(logfile, "EXEPT");
4916
                cpu_loop_exit();
4917
                break;
4918
        case SVM_EVTINJ_TYPE_SOFT:
4919
                env->exception_index = vector;
4920
                env->error_code = event_inj_err;
4921
                env->exception_is_int = 1;
4922
                env->exception_next_eip = EIP;
4923
                if (loglevel & CPU_LOG_TB_IN_ASM)
4924
                    fprintf(logfile, "SOFT");
4925
                cpu_loop_exit();
4926
                break;
4927
        }
4928
        if (loglevel & CPU_LOG_TB_IN_ASM)
4929
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4930
    }
4931
}
4932

    
4933
void helper_vmmcall(void)
4934
{
4935
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4936
    raise_exception(EXCP06_ILLOP);
4937
}
4938

    
4939
void helper_vmload(int aflag)
4940
{
4941
    target_ulong addr;
4942
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4943

    
4944
    if (aflag == 2)
4945
        addr = EAX;
4946
    else
4947
        addr = (uint32_t)EAX;
4948

    
4949
    if (loglevel & CPU_LOG_TB_IN_ASM)
4950
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4951
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4952
                env->segs[R_FS].base);
4953

    
4954
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4955
                       env, R_FS);
4956
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4957
                       env, R_GS);
4958
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4959
                 &env->tr);
4960
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4961
                 &env->ldt);
4962

    
4963
#ifdef TARGET_X86_64
4964
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4965
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4966
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4967
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4968
#endif
4969
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4970
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4971
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4972
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4973
}
4974

    
4975
void helper_vmsave(int aflag)
4976
{
4977
    target_ulong addr;
4978
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4979

    
4980
    if (aflag == 2)
4981
        addr = EAX;
4982
    else
4983
        addr = (uint32_t)EAX;
4984

    
4985
    if (loglevel & CPU_LOG_TB_IN_ASM)
4986
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4987
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4988
                env->segs[R_FS].base);
4989

    
4990
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
4991
                 &env->segs[R_FS]);
4992
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
4993
                 &env->segs[R_GS]);
4994
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
4995
                 &env->tr);
4996
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
4997
                 &env->ldt);
4998

    
4999
#ifdef TARGET_X86_64
5000
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5001
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5002
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5003
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5004
#endif
5005
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5006
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5007
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5008
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5009
}
5010

    
5011
void helper_stgi(void)
5012
{
5013
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5014
    env->hflags2 |= HF2_GIF_MASK;
5015
}
5016

    
5017
void helper_clgi(void)
5018
{
5019
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5020
    env->hflags2 &= ~HF2_GIF_MASK;
5021
}
5022

    
5023
void helper_skinit(void)
5024
{
5025
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5026
    /* XXX: not implemented */
5027
    raise_exception(EXCP06_ILLOP);
5028
}
5029

    
5030
void helper_invlpga(int aflag)
5031
{
5032
    target_ulong addr;
5033
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5034
    
5035
    if (aflag == 2)
5036
        addr = EAX;
5037
    else
5038
        addr = (uint32_t)EAX;
5039

    
5040
    /* XXX: could use the ASID to see if it is needed to do the
5041
       flush */
5042
    tlb_flush_page(env, addr);
5043
}
5044

    
5045
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5046
{
5047
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5048
        return;
5049
    switch(type) {
5050
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5051
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5052
            helper_vmexit(type, param);
5053
        }
5054
        break;
5055
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5056
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5057
            helper_vmexit(type, param);
5058
        }
5059
        break;
5060
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5061
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5062
            helper_vmexit(type, param);
5063
        }
5064
        break;
5065
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5066
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5067
            helper_vmexit(type, param);
5068
        }
5069
        break;
5070
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5071
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5072
            helper_vmexit(type, param);
5073
        }
5074
        break;
5075
    case SVM_EXIT_MSR:
5076
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5077
            /* FIXME: this should be read in at vmrun (faster this way?) */
5078
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5079
            uint32_t t0, t1;
5080
            switch((uint32_t)ECX) {
5081
            case 0 ... 0x1fff:
5082
                t0 = (ECX * 2) % 8;
5083
                t1 = ECX / 8;
5084
                break;
5085
            case 0xc0000000 ... 0xc0001fff:
5086
                t0 = (8192 + ECX - 0xc0000000) * 2;
5087
                t1 = (t0 / 8);
5088
                t0 %= 8;
5089
                break;
5090
            case 0xc0010000 ... 0xc0011fff:
5091
                t0 = (16384 + ECX - 0xc0010000) * 2;
5092
                t1 = (t0 / 8);
5093
                t0 %= 8;
5094
                break;
5095
            default:
5096
                helper_vmexit(type, param);
5097
                t0 = 0;
5098
                t1 = 0;
5099
                break;
5100
            }
5101
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5102
                helper_vmexit(type, param);
5103
        }
5104
        break;
5105
    default:
5106
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5107
            helper_vmexit(type, param);
5108
        }
5109
        break;
5110
    }
5111
}
5112

    
5113
void helper_svm_check_io(uint32_t port, uint32_t param, 
5114
                         uint32_t next_eip_addend)
5115
{
5116
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5117
        /* FIXME: this should be read in at vmrun (faster this way?) */
5118
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5119
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5120
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5121
            /* next EIP */
5122
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5123
                     env->eip + next_eip_addend);
5124
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5125
        }
5126
    }
5127
}
5128

    
5129
/* Note: currently only 32 bits of exit_code are used */
5130
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5131
{
5132
    uint32_t int_ctl;
5133

    
5134
    if (loglevel & CPU_LOG_TB_IN_ASM)
5135
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5136
                exit_code, exit_info_1,
5137
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5138
                EIP);
5139

    
5140
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5141
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5142
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5143
    } else {
5144
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5145
    }
5146

    
5147
    /* Save the VM state in the vmcb */
5148
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5149
                 &env->segs[R_ES]);
5150
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5151
                 &env->segs[R_CS]);
5152
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5153
                 &env->segs[R_SS]);
5154
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5155
                 &env->segs[R_DS]);
5156

    
5157
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5158
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5159

    
5160
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5161
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5162

    
5163
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5164
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5165
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5166
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5167
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5168

    
5169
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5170
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5171
    int_ctl |= env->v_tpr & V_TPR_MASK;
5172
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5173
        int_ctl |= V_IRQ_MASK;
5174
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5175

    
5176
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5177
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5178
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5179
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5180
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5181
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5182
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5183

    
5184
    /* Reload the host state from vm_hsave */
5185
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5186
    env->hflags &= ~HF_SVMI_MASK;
5187
    env->intercept = 0;
5188
    env->intercept_exceptions = 0;
5189
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5190
    env->tsc_offset = 0;
5191

    
5192
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5193
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5194

    
5195
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5196
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5197

    
5198
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5199
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5200
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5201
    /* we need to set the efer after the crs so the hidden flags get
5202
       set properly */
5203
    cpu_load_efer(env, 
5204
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5205
    env->eflags = 0;
5206
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5207
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5208
    CC_OP = CC_OP_EFLAGS;
5209

    
5210
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5211
                       env, R_ES);
5212
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5213
                       env, R_CS);
5214
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5215
                       env, R_SS);
5216
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5217
                       env, R_DS);
5218

    
5219
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5220
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5221
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5222

    
5223
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5224
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5225

    
5226
    /* other setups */
5227
    cpu_x86_set_cpl(env, 0);
5228
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5229
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5230

    
5231
    env->hflags2 &= ~HF2_GIF_MASK;
5232
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5233

    
5234
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5235

    
5236
    /* Clears the TSC_OFFSET inside the processor. */
5237

    
5238
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5239
       from the page table indicated the host's CR3. If the PDPEs contain
5240
       illegal state, the processor causes a shutdown. */
5241

    
5242
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5243
    env->cr[0] |= CR0_PE_MASK;
5244
    env->eflags &= ~VM_MASK;
5245

    
5246
    /* Disables all breakpoints in the host DR7 register. */
5247

    
5248
    /* Checks the reloaded host state for consistency. */
5249

    
5250
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5251
       host's code segment or non-canonical (in the case of long mode), a
5252
       #GP fault is delivered inside the host.) */
5253

    
5254
    /* remove any pending exception */
5255
    env->exception_index = -1;
5256
    env->error_code = 0;
5257
    env->old_exception = -1;
5258

    
5259
    cpu_loop_exit();
5260
}
5261

    
5262
#endif
5263

    
5264
/* MMX/SSE */
5265
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5266
void helper_enter_mmx(void)
5267
{
5268
    env->fpstt = 0;
5269
    *(uint32_t *)(env->fptags) = 0;
5270
    *(uint32_t *)(env->fptags + 4) = 0;
5271
}
5272

    
5273
void helper_emms(void)
5274
{
5275
    /* set to empty state */
5276
    *(uint32_t *)(env->fptags) = 0x01010101;
5277
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5278
}
5279

    
5280
/* XXX: suppress */
5281
void helper_movq(void *d, void *s)
5282
{
5283
    *(uint64_t *)d = *(uint64_t *)s;
5284
}
5285

    
5286
#define SHIFT 0
5287
#include "ops_sse.h"
5288

    
5289
#define SHIFT 1
5290
#include "ops_sse.h"
5291

    
5292
#define SHIFT 0
5293
#include "helper_template.h"
5294
#undef SHIFT
5295

    
5296
#define SHIFT 1
5297
#include "helper_template.h"
5298
#undef SHIFT
5299

    
5300
#define SHIFT 2
5301
#include "helper_template.h"
5302
#undef SHIFT
5303

    
5304
#ifdef TARGET_X86_64
5305

    
5306
#define SHIFT 3
5307
#include "helper_template.h"
5308
#undef SHIFT
5309

    
5310
#endif
5311

    
5312
/* bit operations */
5313
target_ulong helper_bsf(target_ulong t0)
5314
{
5315
    int count;
5316
    target_ulong res;
5317

    
5318
    res = t0;
5319
    count = 0;
5320
    while ((res & 1) == 0) {
5321
        count++;
5322
        res >>= 1;
5323
    }
5324
    return count;
5325
}
5326

    
5327
target_ulong helper_bsr(target_ulong t0)
5328
{
5329
    int count;
5330
    target_ulong res, mask;
5331
    
5332
    res = t0;
5333
    count = TARGET_LONG_BITS - 1;
5334
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5335
    while ((res & mask) == 0) {
5336
        count--;
5337
        res <<= 1;
5338
    }
5339
    return count;
5340
}
5341

    
5342

    
5343
static int compute_all_eflags(void)
5344
{
5345
    return CC_SRC;
5346
}
5347

    
5348
static int compute_c_eflags(void)
5349
{
5350
    return CC_SRC & CC_C;
5351
}
5352

    
5353
uint32_t helper_cc_compute_all(int op)
5354
{
5355
    switch (op) {
5356
    default: /* should never happen */ return 0;
5357

    
5358
    case CC_OP_EFLAGS: return compute_all_eflags();
5359

    
5360
    case CC_OP_MULB: return compute_all_mulb();
5361
    case CC_OP_MULW: return compute_all_mulw();
5362
    case CC_OP_MULL: return compute_all_mull();
5363

    
5364
    case CC_OP_ADDB: return compute_all_addb();
5365
    case CC_OP_ADDW: return compute_all_addw();
5366
    case CC_OP_ADDL: return compute_all_addl();
5367

    
5368
    case CC_OP_ADCB: return compute_all_adcb();
5369
    case CC_OP_ADCW: return compute_all_adcw();
5370
    case CC_OP_ADCL: return compute_all_adcl();
5371

    
5372
    case CC_OP_SUBB: return compute_all_subb();
5373
    case CC_OP_SUBW: return compute_all_subw();
5374
    case CC_OP_SUBL: return compute_all_subl();
5375

    
5376
    case CC_OP_SBBB: return compute_all_sbbb();
5377
    case CC_OP_SBBW: return compute_all_sbbw();
5378
    case CC_OP_SBBL: return compute_all_sbbl();
5379

    
5380
    case CC_OP_LOGICB: return compute_all_logicb();
5381
    case CC_OP_LOGICW: return compute_all_logicw();
5382
    case CC_OP_LOGICL: return compute_all_logicl();
5383

    
5384
    case CC_OP_INCB: return compute_all_incb();
5385
    case CC_OP_INCW: return compute_all_incw();
5386
    case CC_OP_INCL: return compute_all_incl();
5387

    
5388
    case CC_OP_DECB: return compute_all_decb();
5389
    case CC_OP_DECW: return compute_all_decw();
5390
    case CC_OP_DECL: return compute_all_decl();
5391

    
5392
    case CC_OP_SHLB: return compute_all_shlb();
5393
    case CC_OP_SHLW: return compute_all_shlw();
5394
    case CC_OP_SHLL: return compute_all_shll();
5395

    
5396
    case CC_OP_SARB: return compute_all_sarb();
5397
    case CC_OP_SARW: return compute_all_sarw();
5398
    case CC_OP_SARL: return compute_all_sarl();
5399

    
5400
#ifdef TARGET_X86_64
5401
    case CC_OP_MULQ: return compute_all_mulq();
5402

    
5403
    case CC_OP_ADDQ: return compute_all_addq();
5404

    
5405
    case CC_OP_ADCQ: return compute_all_adcq();
5406

    
5407
    case CC_OP_SUBQ: return compute_all_subq();
5408

    
5409
    case CC_OP_SBBQ: return compute_all_sbbq();
5410

    
5411
    case CC_OP_LOGICQ: return compute_all_logicq();
5412

    
5413
    case CC_OP_INCQ: return compute_all_incq();
5414

    
5415
    case CC_OP_DECQ: return compute_all_decq();
5416

    
5417
    case CC_OP_SHLQ: return compute_all_shlq();
5418

    
5419
    case CC_OP_SARQ: return compute_all_sarq();
5420
#endif
5421
    }
5422
}
5423

    
5424
uint32_t helper_cc_compute_c(int op)
5425
{
5426
    switch (op) {
5427
    default: /* should never happen */ return 0;
5428

    
5429
    case CC_OP_EFLAGS: return compute_c_eflags();
5430

    
5431
    case CC_OP_MULB: return compute_c_mull();
5432
    case CC_OP_MULW: return compute_c_mull();
5433
    case CC_OP_MULL: return compute_c_mull();
5434

    
5435
    case CC_OP_ADDB: return compute_c_addb();
5436
    case CC_OP_ADDW: return compute_c_addw();
5437
    case CC_OP_ADDL: return compute_c_addl();
5438

    
5439
    case CC_OP_ADCB: return compute_c_adcb();
5440
    case CC_OP_ADCW: return compute_c_adcw();
5441
    case CC_OP_ADCL: return compute_c_adcl();
5442

    
5443
    case CC_OP_SUBB: return compute_c_subb();
5444
    case CC_OP_SUBW: return compute_c_subw();
5445
    case CC_OP_SUBL: return compute_c_subl();
5446

    
5447
    case CC_OP_SBBB: return compute_c_sbbb();
5448
    case CC_OP_SBBW: return compute_c_sbbw();
5449
    case CC_OP_SBBL: return compute_c_sbbl();
5450

    
5451
    case CC_OP_LOGICB: return compute_c_logicb();
5452
    case CC_OP_LOGICW: return compute_c_logicw();
5453
    case CC_OP_LOGICL: return compute_c_logicl();
5454

    
5455
    case CC_OP_INCB: return compute_c_incl();
5456
    case CC_OP_INCW: return compute_c_incl();
5457
    case CC_OP_INCL: return compute_c_incl();
5458

    
5459
    case CC_OP_DECB: return compute_c_incl();
5460
    case CC_OP_DECW: return compute_c_incl();
5461
    case CC_OP_DECL: return compute_c_incl();
5462

    
5463
    case CC_OP_SHLB: return compute_c_shlb();
5464
    case CC_OP_SHLW: return compute_c_shlw();
5465
    case CC_OP_SHLL: return compute_c_shll();
5466

    
5467
    case CC_OP_SARB: return compute_c_sarl();
5468
    case CC_OP_SARW: return compute_c_sarl();
5469
    case CC_OP_SARL: return compute_c_sarl();
5470

    
5471
#ifdef TARGET_X86_64
5472
    case CC_OP_MULQ: return compute_c_mull();
5473

    
5474
    case CC_OP_ADDQ: return compute_c_addq();
5475

    
5476
    case CC_OP_ADCQ: return compute_c_adcq();
5477

    
5478
    case CC_OP_SUBQ: return compute_c_subq();
5479

    
5480
    case CC_OP_SBBQ: return compute_c_sbbq();
5481

    
5482
    case CC_OP_LOGICQ: return compute_c_logicq();
5483

    
5484
    case CC_OP_INCQ: return compute_c_incl();
5485

    
5486
    case CC_OP_DECQ: return compute_c_incl();
5487

    
5488
    case CC_OP_SHLQ: return compute_c_shlq();
5489

    
5490
    case CC_OP_SARQ: return compute_c_sarl();
5491
#endif
5492
    }
5493
}