Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 1b9d9ebb

History | View | Annotate | Download (149.4 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* XXX: add a is_user flag to have proper security support */
594
#define PUSHW(ssp, sp, sp_mask, val)\
595
{\
596
    sp -= 2;\
597
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598
}
599

    
600
#define PUSHL(ssp, sp, sp_mask, val)\
601
{\
602
    sp -= 4;\
603
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604
}
605

    
606
#define POPW(ssp, sp, sp_mask, val)\
607
{\
608
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609
    sp += 2;\
610
}
611

    
612
#define POPL(ssp, sp, sp_mask, val)\
613
{\
614
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615
    sp += 4;\
616
}
617

    
618
/* protected mode interrupt */
619
static void do_interrupt_protected(int intno, int is_int, int error_code,
620
                                   unsigned int next_eip, int is_hw)
621
{
622
    SegmentCache *dt;
623
    target_ulong ptr, ssp;
624
    int type, dpl, selector, ss_dpl, cpl;
625
    int has_error_code, new_stack, shift;
626
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
    uint32_t old_eip, sp_mask;
628
    int svm_should_check = 1;
629

    
630
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
631
        next_eip = EIP;
632
        svm_should_check = 0;
633
    }
634

    
635
    if (svm_should_check
636
        && (INTERCEPTEDl(_exceptions, 1 << intno)
637
        && !is_int)) {
638
        raise_interrupt(intno, is_int, error_code, 0);
639
    }
640
    has_error_code = 0;
641
    if (!is_int && !is_hw) {
642
        switch(intno) {
643
        case 8:
644
        case 10:
645
        case 11:
646
        case 12:
647
        case 13:
648
        case 14:
649
        case 17:
650
            has_error_code = 1;
651
            break;
652
        }
653
    }
654
    if (is_int)
655
        old_eip = next_eip;
656
    else
657
        old_eip = env->eip;
658

    
659
    dt = &env->idt;
660
    if (intno * 8 + 7 > dt->limit)
661
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
662
    ptr = dt->base + intno * 8;
663
    e1 = ldl_kernel(ptr);
664
    e2 = ldl_kernel(ptr + 4);
665
    /* check gate type */
666
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
667
    switch(type) {
668
    case 5: /* task gate */
669
        /* must do that check here to return the correct error code */
670
        if (!(e2 & DESC_P_MASK))
671
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
672
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
673
        if (has_error_code) {
674
            int type;
675
            uint32_t mask;
676
            /* push the error code */
677
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678
            shift = type >> 3;
679
            if (env->segs[R_SS].flags & DESC_B_MASK)
680
                mask = 0xffffffff;
681
            else
682
                mask = 0xffff;
683
            esp = (ESP - (2 << shift)) & mask;
684
            ssp = env->segs[R_SS].base + esp;
685
            if (shift)
686
                stl_kernel(ssp, error_code);
687
            else
688
                stw_kernel(ssp, error_code);
689
            SET_ESP(esp, mask);
690
        }
691
        return;
692
    case 6: /* 286 interrupt gate */
693
    case 7: /* 286 trap gate */
694
    case 14: /* 386 interrupt gate */
695
    case 15: /* 386 trap gate */
696
        break;
697
    default:
698
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699
        break;
700
    }
701
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702
    cpl = env->hflags & HF_CPL_MASK;
703
    /* check privledge if software int */
704
    if (is_int && dpl < cpl)
705
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
706
    /* check valid bit */
707
    if (!(e2 & DESC_P_MASK))
708
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
709
    selector = e1 >> 16;
710
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
711
    if ((selector & 0xfffc) == 0)
712
        raise_exception_err(EXCP0D_GPF, 0);
713

    
714
    if (load_segment(&e1, &e2, selector) != 0)
715
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
717
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719
    if (dpl > cpl)
720
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721
    if (!(e2 & DESC_P_MASK))
722
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
723
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
724
        /* to inner privilege */
725
        get_ss_esp_from_tss(&ss, &esp, dpl);
726
        if ((ss & 0xfffc) == 0)
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if ((ss & 3) != dpl)
729
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
733
        if (ss_dpl != dpl)
734
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735
        if (!(ss_e2 & DESC_S_MASK) ||
736
            (ss_e2 & DESC_CS_MASK) ||
737
            !(ss_e2 & DESC_W_MASK))
738
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739
        if (!(ss_e2 & DESC_P_MASK))
740
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
741
        new_stack = 1;
742
        sp_mask = get_sp_mask(ss_e2);
743
        ssp = get_seg_base(ss_e1, ss_e2);
744
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
745
        /* to same privilege */
746
        if (env->eflags & VM_MASK)
747
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748
        new_stack = 0;
749
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
750
        ssp = env->segs[R_SS].base;
751
        esp = ESP;
752
        dpl = cpl;
753
    } else {
754
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
755
        new_stack = 0; /* avoid warning */
756
        sp_mask = 0; /* avoid warning */
757
        ssp = 0; /* avoid warning */
758
        esp = 0; /* avoid warning */
759
    }
760

    
761
    shift = type >> 3;
762

    
763
#if 0
764
    /* XXX: check that enough room is available */
765
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
766
    if (env->eflags & VM_MASK)
767
        push_size += 8;
768
    push_size <<= shift;
769
#endif
770
    if (shift == 1) {
771
        if (new_stack) {
772
            if (env->eflags & VM_MASK) {
773
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
774
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
775
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
776
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
777
            }
778
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
779
            PUSHL(ssp, esp, sp_mask, ESP);
780
        }
781
        PUSHL(ssp, esp, sp_mask, compute_eflags());
782
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
783
        PUSHL(ssp, esp, sp_mask, old_eip);
784
        if (has_error_code) {
785
            PUSHL(ssp, esp, sp_mask, error_code);
786
        }
787
    } else {
788
        if (new_stack) {
789
            if (env->eflags & VM_MASK) {
790
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
791
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
792
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
793
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
794
            }
795
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
796
            PUSHW(ssp, esp, sp_mask, ESP);
797
        }
798
        PUSHW(ssp, esp, sp_mask, compute_eflags());
799
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
800
        PUSHW(ssp, esp, sp_mask, old_eip);
801
        if (has_error_code) {
802
            PUSHW(ssp, esp, sp_mask, error_code);
803
        }
804
    }
805

    
806
    if (new_stack) {
807
        if (env->eflags & VM_MASK) {
808
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
809
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
810
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
811
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
812
        }
813
        ss = (ss & ~3) | dpl;
814
        cpu_x86_load_seg_cache(env, R_SS, ss,
815
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
816
    }
817
    SET_ESP(esp, sp_mask);
818

    
819
    selector = (selector & ~3) | dpl;
820
    cpu_x86_load_seg_cache(env, R_CS, selector,
821
                   get_seg_base(e1, e2),
822
                   get_seg_limit(e1, e2),
823
                   e2);
824
    cpu_x86_set_cpl(env, dpl);
825
    env->eip = offset;
826

    
827
    /* interrupt gate clear IF mask */
828
    if ((type & 1) == 0) {
829
        env->eflags &= ~IF_MASK;
830
    }
831
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
832
}
833

    
834
#ifdef TARGET_X86_64
835

    
836
#define PUSHQ(sp, val)\
837
{\
838
    sp -= 8;\
839
    stq_kernel(sp, (val));\
840
}
841

    
842
#define POPQ(sp, val)\
843
{\
844
    val = ldq_kernel(sp);\
845
    sp += 8;\
846
}
847

    
848
static inline target_ulong get_rsp_from_tss(int level)
849
{
850
    int index;
851

    
852
#if 0
853
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
854
           env->tr.base, env->tr.limit);
855
#endif
856

    
857
    if (!(env->tr.flags & DESC_P_MASK))
858
        cpu_abort(env, "invalid tss");
859
    index = 8 * level + 4;
860
    if ((index + 7) > env->tr.limit)
861
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
862
    return ldq_kernel(env->tr.base + index);
863
}
864

    
865
/* 64 bit interrupt */
866
static void do_interrupt64(int intno, int is_int, int error_code,
867
                           target_ulong next_eip, int is_hw)
868
{
869
    SegmentCache *dt;
870
    target_ulong ptr;
871
    int type, dpl, selector, cpl, ist;
872
    int has_error_code, new_stack;
873
    uint32_t e1, e2, e3, ss;
874
    target_ulong old_eip, esp, offset;
875
    int svm_should_check = 1;
876

    
877
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
878
        next_eip = EIP;
879
        svm_should_check = 0;
880
    }
881
    if (svm_should_check
882
        && INTERCEPTEDl(_exceptions, 1 << intno)
883
        && !is_int) {
884
        raise_interrupt(intno, is_int, error_code, 0);
885
    }
886
    has_error_code = 0;
887
    if (!is_int && !is_hw) {
888
        switch(intno) {
889
        case 8:
890
        case 10:
891
        case 11:
892
        case 12:
893
        case 13:
894
        case 14:
895
        case 17:
896
            has_error_code = 1;
897
            break;
898
        }
899
    }
900
    if (is_int)
901
        old_eip = next_eip;
902
    else
903
        old_eip = env->eip;
904

    
905
    dt = &env->idt;
906
    if (intno * 16 + 15 > dt->limit)
907
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908
    ptr = dt->base + intno * 16;
909
    e1 = ldl_kernel(ptr);
910
    e2 = ldl_kernel(ptr + 4);
911
    e3 = ldl_kernel(ptr + 8);
912
    /* check gate type */
913
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914
    switch(type) {
915
    case 14: /* 386 interrupt gate */
916
    case 15: /* 386 trap gate */
917
        break;
918
    default:
919
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920
        break;
921
    }
922
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923
    cpl = env->hflags & HF_CPL_MASK;
924
    /* check privledge if software int */
925
    if (is_int && dpl < cpl)
926
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927
    /* check valid bit */
928
    if (!(e2 & DESC_P_MASK))
929
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930
    selector = e1 >> 16;
931
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932
    ist = e2 & 7;
933
    if ((selector & 0xfffc) == 0)
934
        raise_exception_err(EXCP0D_GPF, 0);
935

    
936
    if (load_segment(&e1, &e2, selector) != 0)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941
    if (dpl > cpl)
942
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
    if (!(e2 & DESC_P_MASK))
944
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
948
        /* to inner privilege */
949
        if (ist != 0)
950
            esp = get_rsp_from_tss(ist + 3);
951
        else
952
            esp = get_rsp_from_tss(dpl);
953
        esp &= ~0xfLL; /* align stack */
954
        ss = 0;
955
        new_stack = 1;
956
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957
        /* to same privilege */
958
        if (env->eflags & VM_MASK)
959
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960
        new_stack = 0;
961
        if (ist != 0)
962
            esp = get_rsp_from_tss(ist + 3);
963
        else
964
            esp = ESP;
965
        esp &= ~0xfLL; /* align stack */
966
        dpl = cpl;
967
    } else {
968
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969
        new_stack = 0; /* avoid warning */
970
        esp = 0; /* avoid warning */
971
    }
972

    
973
    PUSHQ(esp, env->segs[R_SS].selector);
974
    PUSHQ(esp, ESP);
975
    PUSHQ(esp, compute_eflags());
976
    PUSHQ(esp, env->segs[R_CS].selector);
977
    PUSHQ(esp, old_eip);
978
    if (has_error_code) {
979
        PUSHQ(esp, error_code);
980
    }
981

    
982
    if (new_stack) {
983
        ss = 0 | dpl;
984
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985
    }
986
    ESP = esp;
987

    
988
    selector = (selector & ~3) | dpl;
989
    cpu_x86_load_seg_cache(env, R_CS, selector,
990
                   get_seg_base(e1, e2),
991
                   get_seg_limit(e1, e2),
992
                   e2);
993
    cpu_x86_set_cpl(env, dpl);
994
    env->eip = offset;
995

    
996
    /* interrupt gate clear IF mask */
997
    if ((type & 1) == 0) {
998
        env->eflags &= ~IF_MASK;
999
    }
1000
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001
}
1002
#endif
1003

    
1004
#if defined(CONFIG_USER_ONLY)
1005
void helper_syscall(int next_eip_addend)
1006
{
1007
    env->exception_index = EXCP_SYSCALL;
1008
    env->exception_next_eip = env->eip + next_eip_addend;
1009
    cpu_loop_exit();
1010
}
1011
#else
1012
void helper_syscall(int next_eip_addend)
1013
{
1014
    int selector;
1015

    
1016
    if (!(env->efer & MSR_EFER_SCE)) {
1017
        raise_exception_err(EXCP06_ILLOP, 0);
1018
    }
1019
    selector = (env->star >> 32) & 0xffff;
1020
#ifdef TARGET_X86_64
1021
    if (env->hflags & HF_LMA_MASK) {
1022
        int code64;
1023

    
1024
        ECX = env->eip + next_eip_addend;
1025
        env->regs[11] = compute_eflags();
1026

    
1027
        code64 = env->hflags & HF_CS64_MASK;
1028

    
1029
        cpu_x86_set_cpl(env, 0);
1030
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031
                           0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1035
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1036
                               0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_W_MASK | DESC_A_MASK);
1040
        env->eflags &= ~env->fmask;
1041
        load_eflags(env->eflags, 0);
1042
        if (code64)
1043
            env->eip = env->lstar;
1044
        else
1045
            env->eip = env->cstar;
1046
    } else
1047
#endif
1048
    {
1049
        ECX = (uint32_t)(env->eip + next_eip_addend);
1050

    
1051
        cpu_x86_set_cpl(env, 0);
1052
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053
                           0, 0xffffffff,
1054
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055
                               DESC_S_MASK |
1056
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058
                               0, 0xffffffff,
1059
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060
                               DESC_S_MASK |
1061
                               DESC_W_MASK | DESC_A_MASK);
1062
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063
        env->eip = (uint32_t)env->star;
1064
    }
1065
}
1066
#endif
1067

    
1068
void helper_sysret(int dflag)
1069
{
1070
    int cpl, selector;
1071

    
1072
    if (!(env->efer & MSR_EFER_SCE)) {
1073
        raise_exception_err(EXCP06_ILLOP, 0);
1074
    }
1075
    cpl = env->hflags & HF_CPL_MASK;
1076
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077
        raise_exception_err(EXCP0D_GPF, 0);
1078
    }
1079
    selector = (env->star >> 48) & 0xffff;
1080
#ifdef TARGET_X86_64
1081
    if (env->hflags & HF_LMA_MASK) {
1082
        if (dflag == 2) {
1083
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1084
                                   0, 0xffffffff,
1085
                                   DESC_G_MASK | DESC_P_MASK |
1086
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1088
                                   DESC_L_MASK);
1089
            env->eip = ECX;
1090
        } else {
1091
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1092
                                   0, 0xffffffff,
1093
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1096
            env->eip = (uint32_t)ECX;
1097
        }
1098
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099
                               0, 0xffffffff,
1100
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                               DESC_W_MASK | DESC_A_MASK);
1103
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1104
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1105
        cpu_x86_set_cpl(env, 3);
1106
    } else
1107
#endif
1108
    {
1109
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1110
                               0, 0xffffffff,
1111
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1114
        env->eip = (uint32_t)ECX;
1115
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1116
                               0, 0xffffffff,
1117
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1118
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1119
                               DESC_W_MASK | DESC_A_MASK);
1120
        env->eflags |= IF_MASK;
1121
        cpu_x86_set_cpl(env, 3);
1122
    }
1123
#ifdef USE_KQEMU
1124
    if (kqemu_is_ok(env)) {
1125
        if (env->hflags & HF_LMA_MASK)
1126
            CC_OP = CC_OP_EFLAGS;
1127
        env->exception_index = -1;
1128
        cpu_loop_exit();
1129
    }
1130
#endif
1131
}
1132

    
1133
/* real mode interrupt */
1134
static void do_interrupt_real(int intno, int is_int, int error_code,
1135
                              unsigned int next_eip)
1136
{
1137
    SegmentCache *dt;
1138
    target_ulong ptr, ssp;
1139
    int selector;
1140
    uint32_t offset, esp;
1141
    uint32_t old_cs, old_eip;
1142
    int svm_should_check = 1;
1143

    
1144
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1145
        next_eip = EIP;
1146
        svm_should_check = 0;
1147
    }
1148
    if (svm_should_check
1149
        && INTERCEPTEDl(_exceptions, 1 << intno)
1150
        && !is_int) {
1151
        raise_interrupt(intno, is_int, error_code, 0);
1152
    }
1153
    /* real mode (simpler !) */
1154
    dt = &env->idt;
1155
    if (intno * 4 + 3 > dt->limit)
1156
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157
    ptr = dt->base + intno * 4;
1158
    offset = lduw_kernel(ptr);
1159
    selector = lduw_kernel(ptr + 2);
1160
    esp = ESP;
1161
    ssp = env->segs[R_SS].base;
1162
    if (is_int)
1163
        old_eip = next_eip;
1164
    else
1165
        old_eip = env->eip;
1166
    old_cs = env->segs[R_CS].selector;
1167
    /* XXX: use SS segment size ? */
1168
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1169
    PUSHW(ssp, esp, 0xffff, old_cs);
1170
    PUSHW(ssp, esp, 0xffff, old_eip);
1171

    
1172
    /* update processor state */
1173
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1174
    env->eip = offset;
1175
    env->segs[R_CS].selector = selector;
1176
    env->segs[R_CS].base = (selector << 4);
1177
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1178
}
1179

    
1180
/* fake user mode interrupt */
1181
void do_interrupt_user(int intno, int is_int, int error_code,
1182
                       target_ulong next_eip)
1183
{
1184
    SegmentCache *dt;
1185
    target_ulong ptr;
1186
    int dpl, cpl, shift;
1187
    uint32_t e2;
1188

    
1189
    dt = &env->idt;
1190
    if (env->hflags & HF_LMA_MASK) {
1191
        shift = 4;
1192
    } else {
1193
        shift = 3;
1194
    }
1195
    ptr = dt->base + (intno << shift);
1196
    e2 = ldl_kernel(ptr + 4);
1197

    
1198
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1199
    cpl = env->hflags & HF_CPL_MASK;
1200
    /* check privledge if software int */
1201
    if (is_int && dpl < cpl)
1202
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1203

    
1204
    /* Since we emulate only user space, we cannot do more than
1205
       exiting the emulation with the suitable exception and error
1206
       code */
1207
    if (is_int)
1208
        EIP = next_eip;
1209
}
1210

    
1211
/*
1212
 * Begin execution of an interruption. is_int is TRUE if coming from
1213
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214
 * instruction. It is only relevant if is_int is TRUE.
1215
 */
1216
void do_interrupt(int intno, int is_int, int error_code,
1217
                  target_ulong next_eip, int is_hw)
1218
{
1219
    if (loglevel & CPU_LOG_INT) {
1220
        if ((env->cr[0] & CR0_PE_MASK)) {
1221
            static int count;
1222
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223
                    count, intno, error_code, is_int,
1224
                    env->hflags & HF_CPL_MASK,
1225
                    env->segs[R_CS].selector, EIP,
1226
                    (int)env->segs[R_CS].base + EIP,
1227
                    env->segs[R_SS].selector, ESP);
1228
            if (intno == 0x0e) {
1229
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1230
            } else {
1231
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1232
            }
1233
            fprintf(logfile, "\n");
1234
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1235
#if 0
1236
            {
1237
                int i;
1238
                uint8_t *ptr;
1239
                fprintf(logfile, "       code=");
1240
                ptr = env->segs[R_CS].base + env->eip;
1241
                for(i = 0; i < 16; i++) {
1242
                    fprintf(logfile, " %02x", ldub(ptr + i));
1243
                }
1244
                fprintf(logfile, "\n");
1245
            }
1246
#endif
1247
            count++;
1248
        }
1249
    }
1250
    if (env->cr[0] & CR0_PE_MASK) {
1251
#if TARGET_X86_64
1252
        if (env->hflags & HF_LMA_MASK) {
1253
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1254
        } else
1255
#endif
1256
        {
1257
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258
        }
1259
    } else {
1260
        do_interrupt_real(intno, is_int, error_code, next_eip);
1261
    }
1262
}
1263

    
1264
/*
1265
 * Check nested exceptions and change to double or triple fault if
1266
 * needed. It should only be called, if this is not an interrupt.
1267
 * Returns the new exception number.
1268
 */
1269
static int check_exception(int intno, int *error_code)
1270
{
1271
    int first_contributory = env->old_exception == 0 ||
1272
                              (env->old_exception >= 10 &&
1273
                               env->old_exception <= 13);
1274
    int second_contributory = intno == 0 ||
1275
                               (intno >= 10 && intno <= 13);
1276

    
1277
    if (loglevel & CPU_LOG_INT)
1278
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1279
                env->old_exception, intno);
1280

    
1281
    if (env->old_exception == EXCP08_DBLE)
1282
        cpu_abort(env, "triple fault");
1283

    
1284
    if ((first_contributory && second_contributory)
1285
        || (env->old_exception == EXCP0E_PAGE &&
1286
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1287
        intno = EXCP08_DBLE;
1288
        *error_code = 0;
1289
    }
1290

    
1291
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1292
        (intno == EXCP08_DBLE))
1293
        env->old_exception = intno;
1294

    
1295
    return intno;
1296
}
1297

    
1298
/*
1299
 * Signal an interruption. It is executed in the main CPU loop.
1300
 * is_int is TRUE if coming from the int instruction. next_eip is the
1301
 * EIP value AFTER the interrupt instruction. It is only relevant if
1302
 * is_int is TRUE.
1303
 */
1304
void raise_interrupt(int intno, int is_int, int error_code,
1305
                     int next_eip_addend)
1306
{
1307
    if (!is_int) {
1308
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1309
        intno = check_exception(intno, &error_code);
1310
    }
1311

    
1312
    env->exception_index = intno;
1313
    env->error_code = error_code;
1314
    env->exception_is_int = is_int;
1315
    env->exception_next_eip = env->eip + next_eip_addend;
1316
    cpu_loop_exit();
1317
}
1318

    
1319
/* same as raise_exception_err, but do not restore global registers */
1320
static void raise_exception_err_norestore(int exception_index, int error_code)
1321
{
1322
    exception_index = check_exception(exception_index, &error_code);
1323

    
1324
    env->exception_index = exception_index;
1325
    env->error_code = error_code;
1326
    env->exception_is_int = 0;
1327
    env->exception_next_eip = 0;
1328
    longjmp(env->jmp_env, 1);
1329
}
1330

    
1331
/* shortcuts to generate exceptions */
1332

    
1333
void (raise_exception_err)(int exception_index, int error_code)
1334
{
1335
    raise_interrupt(exception_index, 0, error_code, 0);
1336
}
1337

    
1338
void raise_exception(int exception_index)
1339
{
1340
    raise_interrupt(exception_index, 0, 0, 0);
1341
}
1342

    
1343
/* SMM support */
1344

    
1345
#if defined(CONFIG_USER_ONLY)
1346

    
1347
void do_smm_enter(void)
1348
{
1349
}
1350

    
1351
void helper_rsm(void)
1352
{
1353
}
1354

    
1355
#else
1356

    
1357
#ifdef TARGET_X86_64
1358
#define SMM_REVISION_ID 0x00020064
1359
#else
1360
#define SMM_REVISION_ID 0x00020000
1361
#endif
1362

    
1363
void do_smm_enter(void)
1364
{
1365
    target_ulong sm_state;
1366
    SegmentCache *dt;
1367
    int i, offset;
1368

    
1369
    if (loglevel & CPU_LOG_INT) {
1370
        fprintf(logfile, "SMM: enter\n");
1371
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1372
    }
1373

    
1374
    env->hflags |= HF_SMM_MASK;
1375
    cpu_smm_update(env);
1376

    
1377
    sm_state = env->smbase + 0x8000;
1378

    
1379
#ifdef TARGET_X86_64
1380
    for(i = 0; i < 6; i++) {
1381
        dt = &env->segs[i];
1382
        offset = 0x7e00 + i * 16;
1383
        stw_phys(sm_state + offset, dt->selector);
1384
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1385
        stl_phys(sm_state + offset + 4, dt->limit);
1386
        stq_phys(sm_state + offset + 8, dt->base);
1387
    }
1388

    
1389
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1390
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1391

    
1392
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1393
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1394
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1395
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1396

    
1397
    stq_phys(sm_state + 0x7e88, env->idt.base);
1398
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1399

    
1400
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1401
    stq_phys(sm_state + 0x7e98, env->tr.base);
1402
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1403
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1404

    
1405
    stq_phys(sm_state + 0x7ed0, env->efer);
1406

    
1407
    stq_phys(sm_state + 0x7ff8, EAX);
1408
    stq_phys(sm_state + 0x7ff0, ECX);
1409
    stq_phys(sm_state + 0x7fe8, EDX);
1410
    stq_phys(sm_state + 0x7fe0, EBX);
1411
    stq_phys(sm_state + 0x7fd8, ESP);
1412
    stq_phys(sm_state + 0x7fd0, EBP);
1413
    stq_phys(sm_state + 0x7fc8, ESI);
1414
    stq_phys(sm_state + 0x7fc0, EDI);
1415
    for(i = 8; i < 16; i++)
1416
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1417
    stq_phys(sm_state + 0x7f78, env->eip);
1418
    stl_phys(sm_state + 0x7f70, compute_eflags());
1419
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1420
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1421

    
1422
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1423
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1424
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1425

    
1426
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1427
    stl_phys(sm_state + 0x7f00, env->smbase);
1428
#else
1429
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1430
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1431
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1432
    stl_phys(sm_state + 0x7ff0, env->eip);
1433
    stl_phys(sm_state + 0x7fec, EDI);
1434
    stl_phys(sm_state + 0x7fe8, ESI);
1435
    stl_phys(sm_state + 0x7fe4, EBP);
1436
    stl_phys(sm_state + 0x7fe0, ESP);
1437
    stl_phys(sm_state + 0x7fdc, EBX);
1438
    stl_phys(sm_state + 0x7fd8, EDX);
1439
    stl_phys(sm_state + 0x7fd4, ECX);
1440
    stl_phys(sm_state + 0x7fd0, EAX);
1441
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1442
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1443

    
1444
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1445
    stl_phys(sm_state + 0x7f64, env->tr.base);
1446
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1447
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1448

    
1449
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1450
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1451
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1452
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1453

    
1454
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1455
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1456

    
1457
    stl_phys(sm_state + 0x7f58, env->idt.base);
1458
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1459

    
1460
    for(i = 0; i < 6; i++) {
1461
        dt = &env->segs[i];
1462
        if (i < 3)
1463
            offset = 0x7f84 + i * 12;
1464
        else
1465
            offset = 0x7f2c + (i - 3) * 12;
1466
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1467
        stl_phys(sm_state + offset + 8, dt->base);
1468
        stl_phys(sm_state + offset + 4, dt->limit);
1469
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1470
    }
1471
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1472

    
1473
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1474
    stl_phys(sm_state + 0x7ef8, env->smbase);
1475
#endif
1476
    /* init SMM cpu state */
1477

    
1478
#ifdef TARGET_X86_64
1479
    env->efer = 0;
1480
    env->hflags &= ~HF_LMA_MASK;
1481
#endif
1482
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1483
    env->eip = 0x00008000;
1484
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1485
                           0xffffffff, 0);
1486
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1487
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1488
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1489
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1490
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1491

    
1492
    cpu_x86_update_cr0(env,
1493
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1494
    cpu_x86_update_cr4(env, 0);
1495
    env->dr[7] = 0x00000400;
1496
    CC_OP = CC_OP_EFLAGS;
1497
}
1498

    
1499
void helper_rsm(void)
1500
{
1501
    target_ulong sm_state;
1502
    int i, offset;
1503
    uint32_t val;
1504

    
1505
    sm_state = env->smbase + 0x8000;
1506
#ifdef TARGET_X86_64
1507
    env->efer = ldq_phys(sm_state + 0x7ed0);
1508
    if (env->efer & MSR_EFER_LMA)
1509
        env->hflags |= HF_LMA_MASK;
1510
    else
1511
        env->hflags &= ~HF_LMA_MASK;
1512

    
1513
    for(i = 0; i < 6; i++) {
1514
        offset = 0x7e00 + i * 16;
1515
        cpu_x86_load_seg_cache(env, i,
1516
                               lduw_phys(sm_state + offset),
1517
                               ldq_phys(sm_state + offset + 8),
1518
                               ldl_phys(sm_state + offset + 4),
1519
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1520
    }
1521

    
1522
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1523
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1524

    
1525
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1526
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1527
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1528
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1529

    
1530
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1531
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1532

    
1533
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1534
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1535
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1536
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1537

    
1538
    EAX = ldq_phys(sm_state + 0x7ff8);
1539
    ECX = ldq_phys(sm_state + 0x7ff0);
1540
    EDX = ldq_phys(sm_state + 0x7fe8);
1541
    EBX = ldq_phys(sm_state + 0x7fe0);
1542
    ESP = ldq_phys(sm_state + 0x7fd8);
1543
    EBP = ldq_phys(sm_state + 0x7fd0);
1544
    ESI = ldq_phys(sm_state + 0x7fc8);
1545
    EDI = ldq_phys(sm_state + 0x7fc0);
1546
    for(i = 8; i < 16; i++)
1547
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1548
    env->eip = ldq_phys(sm_state + 0x7f78);
1549
    load_eflags(ldl_phys(sm_state + 0x7f70),
1550
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1551
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1552
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1553

    
1554
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1555
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1556
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1557

    
1558
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1559
    if (val & 0x20000) {
1560
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1561
    }
1562
#else
1563
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1564
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1565
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1566
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1567
    env->eip = ldl_phys(sm_state + 0x7ff0);
1568
    EDI = ldl_phys(sm_state + 0x7fec);
1569
    ESI = ldl_phys(sm_state + 0x7fe8);
1570
    EBP = ldl_phys(sm_state + 0x7fe4);
1571
    ESP = ldl_phys(sm_state + 0x7fe0);
1572
    EBX = ldl_phys(sm_state + 0x7fdc);
1573
    EDX = ldl_phys(sm_state + 0x7fd8);
1574
    ECX = ldl_phys(sm_state + 0x7fd4);
1575
    EAX = ldl_phys(sm_state + 0x7fd0);
1576
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1577
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1578

    
1579
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1580
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1581
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1582
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1583

    
1584
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1585
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1586
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1587
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1588

    
1589
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1590
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1591

    
1592
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1593
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1594

    
1595
    for(i = 0; i < 6; i++) {
1596
        if (i < 3)
1597
            offset = 0x7f84 + i * 12;
1598
        else
1599
            offset = 0x7f2c + (i - 3) * 12;
1600
        cpu_x86_load_seg_cache(env, i,
1601
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1602
                               ldl_phys(sm_state + offset + 8),
1603
                               ldl_phys(sm_state + offset + 4),
1604
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1605
    }
1606
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1607

    
1608
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1609
    if (val & 0x20000) {
1610
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1611
    }
1612
#endif
1613
    CC_OP = CC_OP_EFLAGS;
1614
    env->hflags &= ~HF_SMM_MASK;
1615
    cpu_smm_update(env);
1616

    
1617
    if (loglevel & CPU_LOG_INT) {
1618
        fprintf(logfile, "SMM: after RSM\n");
1619
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1620
    }
1621
}
1622

    
1623
#endif /* !CONFIG_USER_ONLY */
1624

    
1625

    
1626
/* division, flags are undefined */
1627

    
1628
void helper_divb_AL(target_ulong t0)
1629
{
1630
    unsigned int num, den, q, r;
1631

    
1632
    num = (EAX & 0xffff);
1633
    den = (t0 & 0xff);
1634
    if (den == 0) {
1635
        raise_exception(EXCP00_DIVZ);
1636
    }
1637
    q = (num / den);
1638
    if (q > 0xff)
1639
        raise_exception(EXCP00_DIVZ);
1640
    q &= 0xff;
1641
    r = (num % den) & 0xff;
1642
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1643
}
1644

    
1645
void helper_idivb_AL(target_ulong t0)
1646
{
1647
    int num, den, q, r;
1648

    
1649
    num = (int16_t)EAX;
1650
    den = (int8_t)t0;
1651
    if (den == 0) {
1652
        raise_exception(EXCP00_DIVZ);
1653
    }
1654
    q = (num / den);
1655
    if (q != (int8_t)q)
1656
        raise_exception(EXCP00_DIVZ);
1657
    q &= 0xff;
1658
    r = (num % den) & 0xff;
1659
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1660
}
1661

    
1662
void helper_divw_AX(target_ulong t0)
1663
{
1664
    unsigned int num, den, q, r;
1665

    
1666
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1667
    den = (t0 & 0xffff);
1668
    if (den == 0) {
1669
        raise_exception(EXCP00_DIVZ);
1670
    }
1671
    q = (num / den);
1672
    if (q > 0xffff)
1673
        raise_exception(EXCP00_DIVZ);
1674
    q &= 0xffff;
1675
    r = (num % den) & 0xffff;
1676
    EAX = (EAX & ~0xffff) | q;
1677
    EDX = (EDX & ~0xffff) | r;
1678
}
1679

    
1680
void helper_idivw_AX(target_ulong t0)
1681
{
1682
    int num, den, q, r;
1683

    
1684
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1685
    den = (int16_t)t0;
1686
    if (den == 0) {
1687
        raise_exception(EXCP00_DIVZ);
1688
    }
1689
    q = (num / den);
1690
    if (q != (int16_t)q)
1691
        raise_exception(EXCP00_DIVZ);
1692
    q &= 0xffff;
1693
    r = (num % den) & 0xffff;
1694
    EAX = (EAX & ~0xffff) | q;
1695
    EDX = (EDX & ~0xffff) | r;
1696
}
1697

    
1698
void helper_divl_EAX(target_ulong t0)
1699
{
1700
    unsigned int den, r;
1701
    uint64_t num, q;
1702

    
1703
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1704
    den = t0;
1705
    if (den == 0) {
1706
        raise_exception(EXCP00_DIVZ);
1707
    }
1708
    q = (num / den);
1709
    r = (num % den);
1710
    if (q > 0xffffffff)
1711
        raise_exception(EXCP00_DIVZ);
1712
    EAX = (uint32_t)q;
1713
    EDX = (uint32_t)r;
1714
}
1715

    
1716
void helper_idivl_EAX(target_ulong t0)
1717
{
1718
    int den, r;
1719
    int64_t num, q;
1720

    
1721
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1722
    den = t0;
1723
    if (den == 0) {
1724
        raise_exception(EXCP00_DIVZ);
1725
    }
1726
    q = (num / den);
1727
    r = (num % den);
1728
    if (q != (int32_t)q)
1729
        raise_exception(EXCP00_DIVZ);
1730
    EAX = (uint32_t)q;
1731
    EDX = (uint32_t)r;
1732
}
1733

    
1734
/* bcd */
1735

    
1736
/* XXX: exception */
1737
void helper_aam(int base)
1738
{
1739
    int al, ah;
1740
    al = EAX & 0xff;
1741
    ah = al / base;
1742
    al = al % base;
1743
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1744
    CC_DST = al;
1745
}
1746

    
1747
void helper_aad(int base)
1748
{
1749
    int al, ah;
1750
    al = EAX & 0xff;
1751
    ah = (EAX >> 8) & 0xff;
1752
    al = ((ah * base) + al) & 0xff;
1753
    EAX = (EAX & ~0xffff) | al;
1754
    CC_DST = al;
1755
}
1756

    
1757
void helper_aaa(void)
1758
{
1759
    int icarry;
1760
    int al, ah, af;
1761
    int eflags;
1762

    
1763
    eflags = cc_table[CC_OP].compute_all();
1764
    af = eflags & CC_A;
1765
    al = EAX & 0xff;
1766
    ah = (EAX >> 8) & 0xff;
1767

    
1768
    icarry = (al > 0xf9);
1769
    if (((al & 0x0f) > 9 ) || af) {
1770
        al = (al + 6) & 0x0f;
1771
        ah = (ah + 1 + icarry) & 0xff;
1772
        eflags |= CC_C | CC_A;
1773
    } else {
1774
        eflags &= ~(CC_C | CC_A);
1775
        al &= 0x0f;
1776
    }
1777
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1778
    CC_SRC = eflags;
1779
    FORCE_RET();
1780
}
1781

    
1782
void helper_aas(void)
1783
{
1784
    int icarry;
1785
    int al, ah, af;
1786
    int eflags;
1787

    
1788
    eflags = cc_table[CC_OP].compute_all();
1789
    af = eflags & CC_A;
1790
    al = EAX & 0xff;
1791
    ah = (EAX >> 8) & 0xff;
1792

    
1793
    icarry = (al < 6);
1794
    if (((al & 0x0f) > 9 ) || af) {
1795
        al = (al - 6) & 0x0f;
1796
        ah = (ah - 1 - icarry) & 0xff;
1797
        eflags |= CC_C | CC_A;
1798
    } else {
1799
        eflags &= ~(CC_C | CC_A);
1800
        al &= 0x0f;
1801
    }
1802
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1803
    CC_SRC = eflags;
1804
    FORCE_RET();
1805
}
1806

    
1807
void helper_daa(void)
1808
{
1809
    int al, af, cf;
1810
    int eflags;
1811

    
1812
    eflags = cc_table[CC_OP].compute_all();
1813
    cf = eflags & CC_C;
1814
    af = eflags & CC_A;
1815
    al = EAX & 0xff;
1816

    
1817
    eflags = 0;
1818
    if (((al & 0x0f) > 9 ) || af) {
1819
        al = (al + 6) & 0xff;
1820
        eflags |= CC_A;
1821
    }
1822
    if ((al > 0x9f) || cf) {
1823
        al = (al + 0x60) & 0xff;
1824
        eflags |= CC_C;
1825
    }
1826
    EAX = (EAX & ~0xff) | al;
1827
    /* well, speed is not an issue here, so we compute the flags by hand */
1828
    eflags |= (al == 0) << 6; /* zf */
1829
    eflags |= parity_table[al]; /* pf */
1830
    eflags |= (al & 0x80); /* sf */
1831
    CC_SRC = eflags;
1832
    FORCE_RET();
1833
}
1834

    
1835
void helper_das(void)
1836
{
1837
    int al, al1, af, cf;
1838
    int eflags;
1839

    
1840
    eflags = cc_table[CC_OP].compute_all();
1841
    cf = eflags & CC_C;
1842
    af = eflags & CC_A;
1843
    al = EAX & 0xff;
1844

    
1845
    eflags = 0;
1846
    al1 = al;
1847
    if (((al & 0x0f) > 9 ) || af) {
1848
        eflags |= CC_A;
1849
        if (al < 6 || cf)
1850
            eflags |= CC_C;
1851
        al = (al - 6) & 0xff;
1852
    }
1853
    if ((al1 > 0x99) || cf) {
1854
        al = (al - 0x60) & 0xff;
1855
        eflags |= CC_C;
1856
    }
1857
    EAX = (EAX & ~0xff) | al;
1858
    /* well, speed is not an issue here, so we compute the flags by hand */
1859
    eflags |= (al == 0) << 6; /* zf */
1860
    eflags |= parity_table[al]; /* pf */
1861
    eflags |= (al & 0x80); /* sf */
1862
    CC_SRC = eflags;
1863
    FORCE_RET();
1864
}
1865

    
1866
void helper_into(int next_eip_addend)
1867
{
1868
    int eflags;
1869
    eflags = cc_table[CC_OP].compute_all();
1870
    if (eflags & CC_O) {
1871
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1872
    }
1873
}
1874

    
1875
void helper_cmpxchg8b(target_ulong a0)
1876
{
1877
    uint64_t d;
1878
    int eflags;
1879

    
1880
    eflags = cc_table[CC_OP].compute_all();
1881
    d = ldq(a0);
1882
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1883
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1884
        eflags |= CC_Z;
1885
    } else {
1886
        EDX = (uint32_t)(d >> 32);
1887
        EAX = (uint32_t)d;
1888
        eflags &= ~CC_Z;
1889
    }
1890
    CC_SRC = eflags;
1891
}
1892

    
1893
#ifdef TARGET_X86_64
1894
void helper_cmpxchg16b(target_ulong a0)
1895
{
1896
    uint64_t d0, d1;
1897
    int eflags;
1898

    
1899
    eflags = cc_table[CC_OP].compute_all();
1900
    d0 = ldq(a0);
1901
    d1 = ldq(a0 + 8);
1902
    if (d0 == EAX && d1 == EDX) {
1903
        stq(a0, EBX);
1904
        stq(a0 + 8, ECX);
1905
        eflags |= CC_Z;
1906
    } else {
1907
        EDX = d1;
1908
        EAX = d0;
1909
        eflags &= ~CC_Z;
1910
    }
1911
    CC_SRC = eflags;
1912
}
1913
#endif
1914

    
1915
void helper_single_step(void)
1916
{
1917
    env->dr[6] |= 0x4000;
1918
    raise_exception(EXCP01_SSTP);
1919
}
1920

    
1921
void helper_cpuid(void)
1922
{
1923
    uint32_t index;
1924
    index = (uint32_t)EAX;
1925

    
1926
    /* test if maximum index reached */
1927
    if (index & 0x80000000) {
1928
        if (index > env->cpuid_xlevel)
1929
            index = env->cpuid_level;
1930
    } else {
1931
        if (index > env->cpuid_level)
1932
            index = env->cpuid_level;
1933
    }
1934

    
1935
    switch(index) {
1936
    case 0:
1937
        EAX = env->cpuid_level;
1938
        EBX = env->cpuid_vendor1;
1939
        EDX = env->cpuid_vendor2;
1940
        ECX = env->cpuid_vendor3;
1941
        break;
1942
    case 1:
1943
        EAX = env->cpuid_version;
1944
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1945
        ECX = env->cpuid_ext_features;
1946
        EDX = env->cpuid_features;
1947
        break;
1948
    case 2:
1949
        /* cache info: needed for Pentium Pro compatibility */
1950
        EAX = 1;
1951
        EBX = 0;
1952
        ECX = 0;
1953
        EDX = 0x2c307d;
1954
        break;
1955
    case 0x80000000:
1956
        EAX = env->cpuid_xlevel;
1957
        EBX = env->cpuid_vendor1;
1958
        EDX = env->cpuid_vendor2;
1959
        ECX = env->cpuid_vendor3;
1960
        break;
1961
    case 0x80000001:
1962
        EAX = env->cpuid_features;
1963
        EBX = 0;
1964
        ECX = env->cpuid_ext3_features;
1965
        EDX = env->cpuid_ext2_features;
1966
        break;
1967
    case 0x80000002:
1968
    case 0x80000003:
1969
    case 0x80000004:
1970
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1971
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1972
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1973
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1974
        break;
1975
    case 0x80000005:
1976
        /* cache info (L1 cache) */
1977
        EAX = 0x01ff01ff;
1978
        EBX = 0x01ff01ff;
1979
        ECX = 0x40020140;
1980
        EDX = 0x40020140;
1981
        break;
1982
    case 0x80000006:
1983
        /* cache info (L2 cache) */
1984
        EAX = 0;
1985
        EBX = 0x42004200;
1986
        ECX = 0x02008140;
1987
        EDX = 0;
1988
        break;
1989
    case 0x80000008:
1990
        /* virtual & phys address size in low 2 bytes. */
1991
/* XXX: This value must match the one used in the MMU code. */ 
1992
#if defined(TARGET_X86_64)
1993
#  if defined(USE_KQEMU)
1994
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1995
#  else
1996
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1997
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1998
#  endif
1999
#else
2000
# if defined(USE_KQEMU)
2001
        EAX = 0x00000020;        /* 32 bits physical */
2002
#  else
2003
        EAX = 0x00000024;        /* 36 bits physical */
2004
#  endif
2005
#endif
2006
        EBX = 0;
2007
        ECX = 0;
2008
        EDX = 0;
2009
        break;
2010
    case 0x8000000A:
2011
        EAX = 0x00000001;
2012
        EBX = 0;
2013
        ECX = 0;
2014
        EDX = 0;
2015
        break;
2016
    default:
2017
        /* reserved values: zero */
2018
        EAX = 0;
2019
        EBX = 0;
2020
        ECX = 0;
2021
        EDX = 0;
2022
        break;
2023
    }
2024
}
2025

    
2026
void helper_enter_level(int level, int data32, target_ulong t1)
2027
{
2028
    target_ulong ssp;
2029
    uint32_t esp_mask, esp, ebp;
2030

    
2031
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2032
    ssp = env->segs[R_SS].base;
2033
    ebp = EBP;
2034
    esp = ESP;
2035
    if (data32) {
2036
        /* 32 bit */
2037
        esp -= 4;
2038
        while (--level) {
2039
            esp -= 4;
2040
            ebp -= 4;
2041
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2042
        }
2043
        esp -= 4;
2044
        stl(ssp + (esp & esp_mask), t1);
2045
    } else {
2046
        /* 16 bit */
2047
        esp -= 2;
2048
        while (--level) {
2049
            esp -= 2;
2050
            ebp -= 2;
2051
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2052
        }
2053
        esp -= 2;
2054
        stw(ssp + (esp & esp_mask), t1);
2055
    }
2056
}
2057

    
2058
#ifdef TARGET_X86_64
2059
void helper_enter64_level(int level, int data64, target_ulong t1)
2060
{
2061
    target_ulong esp, ebp;
2062
    ebp = EBP;
2063
    esp = ESP;
2064

    
2065
    if (data64) {
2066
        /* 64 bit */
2067
        esp -= 8;
2068
        while (--level) {
2069
            esp -= 8;
2070
            ebp -= 8;
2071
            stq(esp, ldq(ebp));
2072
        }
2073
        esp -= 8;
2074
        stq(esp, t1);
2075
    } else {
2076
        /* 16 bit */
2077
        esp -= 2;
2078
        while (--level) {
2079
            esp -= 2;
2080
            ebp -= 2;
2081
            stw(esp, lduw(ebp));
2082
        }
2083
        esp -= 2;
2084
        stw(esp, t1);
2085
    }
2086
}
2087
#endif
2088

    
2089
void helper_lldt(int selector)
2090
{
2091
    SegmentCache *dt;
2092
    uint32_t e1, e2;
2093
    int index, entry_limit;
2094
    target_ulong ptr;
2095

    
2096
    selector &= 0xffff;
2097
    if ((selector & 0xfffc) == 0) {
2098
        /* XXX: NULL selector case: invalid LDT */
2099
        env->ldt.base = 0;
2100
        env->ldt.limit = 0;
2101
    } else {
2102
        if (selector & 0x4)
2103
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104
        dt = &env->gdt;
2105
        index = selector & ~7;
2106
#ifdef TARGET_X86_64
2107
        if (env->hflags & HF_LMA_MASK)
2108
            entry_limit = 15;
2109
        else
2110
#endif
2111
            entry_limit = 7;
2112
        if ((index + entry_limit) > dt->limit)
2113
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2114
        ptr = dt->base + index;
2115
        e1 = ldl_kernel(ptr);
2116
        e2 = ldl_kernel(ptr + 4);
2117
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2118
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
        if (!(e2 & DESC_P_MASK))
2120
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2121
#ifdef TARGET_X86_64
2122
        if (env->hflags & HF_LMA_MASK) {
2123
            uint32_t e3;
2124
            e3 = ldl_kernel(ptr + 8);
2125
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2126
            env->ldt.base |= (target_ulong)e3 << 32;
2127
        } else
2128
#endif
2129
        {
2130
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2131
        }
2132
    }
2133
    env->ldt.selector = selector;
2134
}
2135

    
2136
void helper_ltr(int selector)
2137
{
2138
    SegmentCache *dt;
2139
    uint32_t e1, e2;
2140
    int index, type, entry_limit;
2141
    target_ulong ptr;
2142

    
2143
    selector &= 0xffff;
2144
    if ((selector & 0xfffc) == 0) {
2145
        /* NULL selector case: invalid TR */
2146
        env->tr.base = 0;
2147
        env->tr.limit = 0;
2148
        env->tr.flags = 0;
2149
    } else {
2150
        if (selector & 0x4)
2151
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152
        dt = &env->gdt;
2153
        index = selector & ~7;
2154
#ifdef TARGET_X86_64
2155
        if (env->hflags & HF_LMA_MASK)
2156
            entry_limit = 15;
2157
        else
2158
#endif
2159
            entry_limit = 7;
2160
        if ((index + entry_limit) > dt->limit)
2161
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2162
        ptr = dt->base + index;
2163
        e1 = ldl_kernel(ptr);
2164
        e2 = ldl_kernel(ptr + 4);
2165
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2166
        if ((e2 & DESC_S_MASK) ||
2167
            (type != 1 && type != 9))
2168
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169
        if (!(e2 & DESC_P_MASK))
2170
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2171
#ifdef TARGET_X86_64
2172
        if (env->hflags & HF_LMA_MASK) {
2173
            uint32_t e3, e4;
2174
            e3 = ldl_kernel(ptr + 8);
2175
            e4 = ldl_kernel(ptr + 12);
2176
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2177
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2178
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2179
            env->tr.base |= (target_ulong)e3 << 32;
2180
        } else
2181
#endif
2182
        {
2183
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2184
        }
2185
        e2 |= DESC_TSS_BUSY_MASK;
2186
        stl_kernel(ptr + 4, e2);
2187
    }
2188
    env->tr.selector = selector;
2189
}
2190

    
2191
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2192
void helper_load_seg(int seg_reg, int selector)
2193
{
2194
    uint32_t e1, e2;
2195
    int cpl, dpl, rpl;
2196
    SegmentCache *dt;
2197
    int index;
2198
    target_ulong ptr;
2199

    
2200
    selector &= 0xffff;
2201
    cpl = env->hflags & HF_CPL_MASK;
2202
    if ((selector & 0xfffc) == 0) {
2203
        /* null selector case */
2204
        if (seg_reg == R_SS
2205
#ifdef TARGET_X86_64
2206
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2207
#endif
2208
            )
2209
            raise_exception_err(EXCP0D_GPF, 0);
2210
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2211
    } else {
2212

    
2213
        if (selector & 0x4)
2214
            dt = &env->ldt;
2215
        else
2216
            dt = &env->gdt;
2217
        index = selector & ~7;
2218
        if ((index + 7) > dt->limit)
2219
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2220
        ptr = dt->base + index;
2221
        e1 = ldl_kernel(ptr);
2222
        e2 = ldl_kernel(ptr + 4);
2223

    
2224
        if (!(e2 & DESC_S_MASK))
2225
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2226
        rpl = selector & 3;
2227
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2228
        if (seg_reg == R_SS) {
2229
            /* must be writable segment */
2230
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2231
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2232
            if (rpl != cpl || dpl != cpl)
2233
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2234
        } else {
2235
            /* must be readable segment */
2236
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2237
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2238

    
2239
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2240
                /* if not conforming code, test rights */
2241
                if (dpl < cpl || dpl < rpl)
2242
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2243
            }
2244
        }
2245

    
2246
        if (!(e2 & DESC_P_MASK)) {
2247
            if (seg_reg == R_SS)
2248
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2249
            else
2250
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2251
        }
2252

    
2253
        /* set the access bit if not already set */
2254
        if (!(e2 & DESC_A_MASK)) {
2255
            e2 |= DESC_A_MASK;
2256
            stl_kernel(ptr + 4, e2);
2257
        }
2258

    
2259
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2260
                       get_seg_base(e1, e2),
2261
                       get_seg_limit(e1, e2),
2262
                       e2);
2263
#if 0
2264
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2265
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2266
#endif
2267
    }
2268
}
2269

    
2270
/* protected mode jump */
2271
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2272
                           int next_eip_addend)
2273
{
2274
    int gate_cs, type;
2275
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2276
    target_ulong next_eip;
2277

    
2278
    if ((new_cs & 0xfffc) == 0)
2279
        raise_exception_err(EXCP0D_GPF, 0);
2280
    if (load_segment(&e1, &e2, new_cs) != 0)
2281
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282
    cpl = env->hflags & HF_CPL_MASK;
2283
    if (e2 & DESC_S_MASK) {
2284
        if (!(e2 & DESC_CS_MASK))
2285
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2287
        if (e2 & DESC_C_MASK) {
2288
            /* conforming code segment */
2289
            if (dpl > cpl)
2290
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291
        } else {
2292
            /* non conforming code segment */
2293
            rpl = new_cs & 3;
2294
            if (rpl > cpl)
2295
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296
            if (dpl != cpl)
2297
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2298
        }
2299
        if (!(e2 & DESC_P_MASK))
2300
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2301
        limit = get_seg_limit(e1, e2);
2302
        if (new_eip > limit &&
2303
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2304
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2306
                       get_seg_base(e1, e2), limit, e2);
2307
        EIP = new_eip;
2308
    } else {
2309
        /* jump to call or task gate */
2310
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2311
        rpl = new_cs & 3;
2312
        cpl = env->hflags & HF_CPL_MASK;
2313
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2314
        switch(type) {
2315
        case 1: /* 286 TSS */
2316
        case 9: /* 386 TSS */
2317
        case 5: /* task gate */
2318
            if (dpl < cpl || dpl < rpl)
2319
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2320
            next_eip = env->eip + next_eip_addend;
2321
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2322
            CC_OP = CC_OP_EFLAGS;
2323
            break;
2324
        case 4: /* 286 call gate */
2325
        case 12: /* 386 call gate */
2326
            if ((dpl < cpl) || (dpl < rpl))
2327
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328
            if (!(e2 & DESC_P_MASK))
2329
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2330
            gate_cs = e1 >> 16;
2331
            new_eip = (e1 & 0xffff);
2332
            if (type == 12)
2333
                new_eip |= (e2 & 0xffff0000);
2334
            if (load_segment(&e1, &e2, gate_cs) != 0)
2335
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2336
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2337
            /* must be code segment */
2338
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2339
                 (DESC_S_MASK | DESC_CS_MASK)))
2340
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2341
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2342
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2343
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2344
            if (!(e2 & DESC_P_MASK))
2345
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2346
            limit = get_seg_limit(e1, e2);
2347
            if (new_eip > limit)
2348
                raise_exception_err(EXCP0D_GPF, 0);
2349
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2350
                                   get_seg_base(e1, e2), limit, e2);
2351
            EIP = new_eip;
2352
            break;
2353
        default:
2354
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2355
            break;
2356
        }
2357
    }
2358
}
2359

    
2360
/* real mode call */
2361
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2362
                       int shift, int next_eip)
2363
{
2364
    int new_eip;
2365
    uint32_t esp, esp_mask;
2366
    target_ulong ssp;
2367

    
2368
    new_eip = new_eip1;
2369
    esp = ESP;
2370
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2371
    ssp = env->segs[R_SS].base;
2372
    if (shift) {
2373
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2374
        PUSHL(ssp, esp, esp_mask, next_eip);
2375
    } else {
2376
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2377
        PUSHW(ssp, esp, esp_mask, next_eip);
2378
    }
2379

    
2380
    SET_ESP(esp, esp_mask);
2381
    env->eip = new_eip;
2382
    env->segs[R_CS].selector = new_cs;
2383
    env->segs[R_CS].base = (new_cs << 4);
2384
}
2385

    
2386
/* protected mode call */
2387
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2388
                            int shift, int next_eip_addend)
2389
{
2390
    int new_stack, i;
2391
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2392
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2393
    uint32_t val, limit, old_sp_mask;
2394
    target_ulong ssp, old_ssp, next_eip;
2395

    
2396
    next_eip = env->eip + next_eip_addend;
2397
#ifdef DEBUG_PCALL
2398
    if (loglevel & CPU_LOG_PCALL) {
2399
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2400
                new_cs, (uint32_t)new_eip, shift);
2401
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2402
    }
2403
#endif
2404
    if ((new_cs & 0xfffc) == 0)
2405
        raise_exception_err(EXCP0D_GPF, 0);
2406
    if (load_segment(&e1, &e2, new_cs) != 0)
2407
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408
    cpl = env->hflags & HF_CPL_MASK;
2409
#ifdef DEBUG_PCALL
2410
    if (loglevel & CPU_LOG_PCALL) {
2411
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2412
    }
2413
#endif
2414
    if (e2 & DESC_S_MASK) {
2415
        if (!(e2 & DESC_CS_MASK))
2416
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2417
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2418
        if (e2 & DESC_C_MASK) {
2419
            /* conforming code segment */
2420
            if (dpl > cpl)
2421
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422
        } else {
2423
            /* non conforming code segment */
2424
            rpl = new_cs & 3;
2425
            if (rpl > cpl)
2426
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2427
            if (dpl != cpl)
2428
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2429
        }
2430
        if (!(e2 & DESC_P_MASK))
2431
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2432

    
2433
#ifdef TARGET_X86_64
2434
        /* XXX: check 16/32 bit cases in long mode */
2435
        if (shift == 2) {
2436
            target_ulong rsp;
2437
            /* 64 bit case */
2438
            rsp = ESP;
2439
            PUSHQ(rsp, env->segs[R_CS].selector);
2440
            PUSHQ(rsp, next_eip);
2441
            /* from this point, not restartable */
2442
            ESP = rsp;
2443
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2444
                                   get_seg_base(e1, e2),
2445
                                   get_seg_limit(e1, e2), e2);
2446
            EIP = new_eip;
2447
        } else
2448
#endif
2449
        {
2450
            sp = ESP;
2451
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2452
            ssp = env->segs[R_SS].base;
2453
            if (shift) {
2454
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2455
                PUSHL(ssp, sp, sp_mask, next_eip);
2456
            } else {
2457
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2458
                PUSHW(ssp, sp, sp_mask, next_eip);
2459
            }
2460

    
2461
            limit = get_seg_limit(e1, e2);
2462
            if (new_eip > limit)
2463
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2464
            /* from this point, not restartable */
2465
            SET_ESP(sp, sp_mask);
2466
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2467
                                   get_seg_base(e1, e2), limit, e2);
2468
            EIP = new_eip;
2469
        }
2470
    } else {
2471
        /* check gate type */
2472
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2473
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2474
        rpl = new_cs & 3;
2475
        switch(type) {
2476
        case 1: /* available 286 TSS */
2477
        case 9: /* available 386 TSS */
2478
        case 5: /* task gate */
2479
            if (dpl < cpl || dpl < rpl)
2480
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2481
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2482
            CC_OP = CC_OP_EFLAGS;
2483
            return;
2484
        case 4: /* 286 call gate */
2485
        case 12: /* 386 call gate */
2486
            break;
2487
        default:
2488
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2489
            break;
2490
        }
2491
        shift = type >> 3;
2492

    
2493
        if (dpl < cpl || dpl < rpl)
2494
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2495
        /* check valid bit */
2496
        if (!(e2 & DESC_P_MASK))
2497
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2498
        selector = e1 >> 16;
2499
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2500
        param_count = e2 & 0x1f;
2501
        if ((selector & 0xfffc) == 0)
2502
            raise_exception_err(EXCP0D_GPF, 0);
2503

    
2504
        if (load_segment(&e1, &e2, selector) != 0)
2505
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2506
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2507
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2508
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2509
        if (dpl > cpl)
2510
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511
        if (!(e2 & DESC_P_MASK))
2512
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2513

    
2514
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2515
            /* to inner privilege */
2516
            get_ss_esp_from_tss(&ss, &sp, dpl);
2517
#ifdef DEBUG_PCALL
2518
            if (loglevel & CPU_LOG_PCALL)
2519
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2520
                        ss, sp, param_count, ESP);
2521
#endif
2522
            if ((ss & 0xfffc) == 0)
2523
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2524
            if ((ss & 3) != dpl)
2525
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2526
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2527
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2528
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2529
            if (ss_dpl != dpl)
2530
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2531
            if (!(ss_e2 & DESC_S_MASK) ||
2532
                (ss_e2 & DESC_CS_MASK) ||
2533
                !(ss_e2 & DESC_W_MASK))
2534
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2535
            if (!(ss_e2 & DESC_P_MASK))
2536
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2537

    
2538
            //            push_size = ((param_count * 2) + 8) << shift;
2539

    
2540
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2541
            old_ssp = env->segs[R_SS].base;
2542

    
2543
            sp_mask = get_sp_mask(ss_e2);
2544
            ssp = get_seg_base(ss_e1, ss_e2);
2545
            if (shift) {
2546
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2547
                PUSHL(ssp, sp, sp_mask, ESP);
2548
                for(i = param_count - 1; i >= 0; i--) {
2549
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2550
                    PUSHL(ssp, sp, sp_mask, val);
2551
                }
2552
            } else {
2553
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2554
                PUSHW(ssp, sp, sp_mask, ESP);
2555
                for(i = param_count - 1; i >= 0; i--) {
2556
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2557
                    PUSHW(ssp, sp, sp_mask, val);
2558
                }
2559
            }
2560
            new_stack = 1;
2561
        } else {
2562
            /* to same privilege */
2563
            sp = ESP;
2564
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2565
            ssp = env->segs[R_SS].base;
2566
            //            push_size = (4 << shift);
2567
            new_stack = 0;
2568
        }
2569

    
2570
        if (shift) {
2571
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2572
            PUSHL(ssp, sp, sp_mask, next_eip);
2573
        } else {
2574
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2575
            PUSHW(ssp, sp, sp_mask, next_eip);
2576
        }
2577

    
2578
        /* from this point, not restartable */
2579

    
2580
        if (new_stack) {
2581
            ss = (ss & ~3) | dpl;
2582
            cpu_x86_load_seg_cache(env, R_SS, ss,
2583
                                   ssp,
2584
                                   get_seg_limit(ss_e1, ss_e2),
2585
                                   ss_e2);
2586
        }
2587

    
2588
        selector = (selector & ~3) | dpl;
2589
        cpu_x86_load_seg_cache(env, R_CS, selector,
2590
                       get_seg_base(e1, e2),
2591
                       get_seg_limit(e1, e2),
2592
                       e2);
2593
        cpu_x86_set_cpl(env, dpl);
2594
        SET_ESP(sp, sp_mask);
2595
        EIP = offset;
2596
    }
2597
#ifdef USE_KQEMU
2598
    if (kqemu_is_ok(env)) {
2599
        env->exception_index = -1;
2600
        cpu_loop_exit();
2601
    }
2602
#endif
2603
}
2604

    
2605
/* real and vm86 mode iret */
2606
void helper_iret_real(int shift)
2607
{
2608
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2609
    target_ulong ssp;
2610
    int eflags_mask;
2611

    
2612
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2613
    sp = ESP;
2614
    ssp = env->segs[R_SS].base;
2615
    if (shift == 1) {
2616
        /* 32 bits */
2617
        POPL(ssp, sp, sp_mask, new_eip);
2618
        POPL(ssp, sp, sp_mask, new_cs);
2619
        new_cs &= 0xffff;
2620
        POPL(ssp, sp, sp_mask, new_eflags);
2621
    } else {
2622
        /* 16 bits */
2623
        POPW(ssp, sp, sp_mask, new_eip);
2624
        POPW(ssp, sp, sp_mask, new_cs);
2625
        POPW(ssp, sp, sp_mask, new_eflags);
2626
    }
2627
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2628
    load_seg_vm(R_CS, new_cs);
2629
    env->eip = new_eip;
2630
    if (env->eflags & VM_MASK)
2631
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2632
    else
2633
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2634
    if (shift == 0)
2635
        eflags_mask &= 0xffff;
2636
    load_eflags(new_eflags, eflags_mask);
2637
    env->hflags &= ~HF_NMI_MASK;
2638
}
2639

    
2640
static inline void validate_seg(int seg_reg, int cpl)
2641
{
2642
    int dpl;
2643
    uint32_t e2;
2644

    
2645
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2646
       they may still contain a valid base. I would be interested to
2647
       know how a real x86_64 CPU behaves */
2648
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2649
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2650
        return;
2651

    
2652
    e2 = env->segs[seg_reg].flags;
2653
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2654
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2655
        /* data or non conforming code segment */
2656
        if (dpl < cpl) {
2657
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2658
        }
2659
    }
2660
}
2661

    
2662
/* protected mode iret */
2663
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2664
{
2665
    uint32_t new_cs, new_eflags, new_ss;
2666
    uint32_t new_es, new_ds, new_fs, new_gs;
2667
    uint32_t e1, e2, ss_e1, ss_e2;
2668
    int cpl, dpl, rpl, eflags_mask, iopl;
2669
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2670

    
2671
#ifdef TARGET_X86_64
2672
    if (shift == 2)
2673
        sp_mask = -1;
2674
    else
2675
#endif
2676
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2677
    sp = ESP;
2678
    ssp = env->segs[R_SS].base;
2679
    new_eflags = 0; /* avoid warning */
2680
#ifdef TARGET_X86_64
2681
    if (shift == 2) {
2682
        POPQ(sp, new_eip);
2683
        POPQ(sp, new_cs);
2684
        new_cs &= 0xffff;
2685
        if (is_iret) {
2686
            POPQ(sp, new_eflags);
2687
        }
2688
    } else
2689
#endif
2690
    if (shift == 1) {
2691
        /* 32 bits */
2692
        POPL(ssp, sp, sp_mask, new_eip);
2693
        POPL(ssp, sp, sp_mask, new_cs);
2694
        new_cs &= 0xffff;
2695
        if (is_iret) {
2696
            POPL(ssp, sp, sp_mask, new_eflags);
2697
            if (new_eflags & VM_MASK)
2698
                goto return_to_vm86;
2699
        }
2700
    } else {
2701
        /* 16 bits */
2702
        POPW(ssp, sp, sp_mask, new_eip);
2703
        POPW(ssp, sp, sp_mask, new_cs);
2704
        if (is_iret)
2705
            POPW(ssp, sp, sp_mask, new_eflags);
2706
    }
2707
#ifdef DEBUG_PCALL
2708
    if (loglevel & CPU_LOG_PCALL) {
2709
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2710
                new_cs, new_eip, shift, addend);
2711
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2712
    }
2713
#endif
2714
    if ((new_cs & 0xfffc) == 0)
2715
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2716
    if (load_segment(&e1, &e2, new_cs) != 0)
2717
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2718
    if (!(e2 & DESC_S_MASK) ||
2719
        !(e2 & DESC_CS_MASK))
2720
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2721
    cpl = env->hflags & HF_CPL_MASK;
2722
    rpl = new_cs & 3;
2723
    if (rpl < cpl)
2724
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2726
    if (e2 & DESC_C_MASK) {
2727
        if (dpl > rpl)
2728
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2729
    } else {
2730
        if (dpl != rpl)
2731
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2732
    }
2733
    if (!(e2 & DESC_P_MASK))
2734
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2735

    
2736
    sp += addend;
2737
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2738
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2739
        /* return to same priledge level */
2740
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2741
                       get_seg_base(e1, e2),
2742
                       get_seg_limit(e1, e2),
2743
                       e2);
2744
    } else {
2745
        /* return to different privilege level */
2746
#ifdef TARGET_X86_64
2747
        if (shift == 2) {
2748
            POPQ(sp, new_esp);
2749
            POPQ(sp, new_ss);
2750
            new_ss &= 0xffff;
2751
        } else
2752
#endif
2753
        if (shift == 1) {
2754
            /* 32 bits */
2755
            POPL(ssp, sp, sp_mask, new_esp);
2756
            POPL(ssp, sp, sp_mask, new_ss);
2757
            new_ss &= 0xffff;
2758
        } else {
2759
            /* 16 bits */
2760
            POPW(ssp, sp, sp_mask, new_esp);
2761
            POPW(ssp, sp, sp_mask, new_ss);
2762
        }
2763
#ifdef DEBUG_PCALL
2764
        if (loglevel & CPU_LOG_PCALL) {
2765
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2766
                    new_ss, new_esp);
2767
        }
2768
#endif
2769
        if ((new_ss & 0xfffc) == 0) {
2770
#ifdef TARGET_X86_64
2771
            /* NULL ss is allowed in long mode if cpl != 3*/
2772
            /* XXX: test CS64 ? */
2773
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2774
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2775
                                       0, 0xffffffff,
2776
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2777
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2778
                                       DESC_W_MASK | DESC_A_MASK);
2779
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2780
            } else
2781
#endif
2782
            {
2783
                raise_exception_err(EXCP0D_GPF, 0);
2784
            }
2785
        } else {
2786
            if ((new_ss & 3) != rpl)
2787
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2788
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2789
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2790
            if (!(ss_e2 & DESC_S_MASK) ||
2791
                (ss_e2 & DESC_CS_MASK) ||
2792
                !(ss_e2 & DESC_W_MASK))
2793
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2795
            if (dpl != rpl)
2796
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2797
            if (!(ss_e2 & DESC_P_MASK))
2798
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2799
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2800
                                   get_seg_base(ss_e1, ss_e2),
2801
                                   get_seg_limit(ss_e1, ss_e2),
2802
                                   ss_e2);
2803
        }
2804

    
2805
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2806
                       get_seg_base(e1, e2),
2807
                       get_seg_limit(e1, e2),
2808
                       e2);
2809
        cpu_x86_set_cpl(env, rpl);
2810
        sp = new_esp;
2811
#ifdef TARGET_X86_64
2812
        if (env->hflags & HF_CS64_MASK)
2813
            sp_mask = -1;
2814
        else
2815
#endif
2816
            sp_mask = get_sp_mask(ss_e2);
2817

    
2818
        /* validate data segments */
2819
        validate_seg(R_ES, rpl);
2820
        validate_seg(R_DS, rpl);
2821
        validate_seg(R_FS, rpl);
2822
        validate_seg(R_GS, rpl);
2823

    
2824
        sp += addend;
2825
    }
2826
    SET_ESP(sp, sp_mask);
2827
    env->eip = new_eip;
2828
    if (is_iret) {
2829
        /* NOTE: 'cpl' is the _old_ CPL */
2830
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2831
        if (cpl == 0)
2832
            eflags_mask |= IOPL_MASK;
2833
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2834
        if (cpl <= iopl)
2835
            eflags_mask |= IF_MASK;
2836
        if (shift == 0)
2837
            eflags_mask &= 0xffff;
2838
        load_eflags(new_eflags, eflags_mask);
2839
    }
2840
    return;
2841

    
2842
 return_to_vm86:
2843
    POPL(ssp, sp, sp_mask, new_esp);
2844
    POPL(ssp, sp, sp_mask, new_ss);
2845
    POPL(ssp, sp, sp_mask, new_es);
2846
    POPL(ssp, sp, sp_mask, new_ds);
2847
    POPL(ssp, sp, sp_mask, new_fs);
2848
    POPL(ssp, sp, sp_mask, new_gs);
2849

    
2850
    /* modify processor state */
2851
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2852
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2853
    load_seg_vm(R_CS, new_cs & 0xffff);
2854
    cpu_x86_set_cpl(env, 3);
2855
    load_seg_vm(R_SS, new_ss & 0xffff);
2856
    load_seg_vm(R_ES, new_es & 0xffff);
2857
    load_seg_vm(R_DS, new_ds & 0xffff);
2858
    load_seg_vm(R_FS, new_fs & 0xffff);
2859
    load_seg_vm(R_GS, new_gs & 0xffff);
2860

    
2861
    env->eip = new_eip & 0xffff;
2862
    ESP = new_esp;
2863
}
2864

    
2865
void helper_iret_protected(int shift, int next_eip)
2866
{
2867
    int tss_selector, type;
2868
    uint32_t e1, e2;
2869

    
2870
    /* specific case for TSS */
2871
    if (env->eflags & NT_MASK) {
2872
#ifdef TARGET_X86_64
2873
        if (env->hflags & HF_LMA_MASK)
2874
            raise_exception_err(EXCP0D_GPF, 0);
2875
#endif
2876
        tss_selector = lduw_kernel(env->tr.base + 0);
2877
        if (tss_selector & 4)
2878
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2879
        if (load_segment(&e1, &e2, tss_selector) != 0)
2880
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2881
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2882
        /* NOTE: we check both segment and busy TSS */
2883
        if (type != 3)
2884
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2885
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2886
    } else {
2887
        helper_ret_protected(shift, 1, 0);
2888
    }
2889
    env->hflags &= ~HF_NMI_MASK;
2890
#ifdef USE_KQEMU
2891
    if (kqemu_is_ok(env)) {
2892
        CC_OP = CC_OP_EFLAGS;
2893
        env->exception_index = -1;
2894
        cpu_loop_exit();
2895
    }
2896
#endif
2897
}
2898

    
2899
void helper_lret_protected(int shift, int addend)
2900
{
2901
    helper_ret_protected(shift, 0, addend);
2902
#ifdef USE_KQEMU
2903
    if (kqemu_is_ok(env)) {
2904
        env->exception_index = -1;
2905
        cpu_loop_exit();
2906
    }
2907
#endif
2908
}
2909

    
2910
void helper_sysenter(void)
2911
{
2912
    if (env->sysenter_cs == 0) {
2913
        raise_exception_err(EXCP0D_GPF, 0);
2914
    }
2915
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2916
    cpu_x86_set_cpl(env, 0);
2917
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2918
                           0, 0xffffffff,
2919
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2920
                           DESC_S_MASK |
2921
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2922
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2923
                           0, 0xffffffff,
2924
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2925
                           DESC_S_MASK |
2926
                           DESC_W_MASK | DESC_A_MASK);
2927
    ESP = env->sysenter_esp;
2928
    EIP = env->sysenter_eip;
2929
}
2930

    
2931
void helper_sysexit(void)
2932
{
2933
    int cpl;
2934

    
2935
    cpl = env->hflags & HF_CPL_MASK;
2936
    if (env->sysenter_cs == 0 || cpl != 0) {
2937
        raise_exception_err(EXCP0D_GPF, 0);
2938
    }
2939
    cpu_x86_set_cpl(env, 3);
2940
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2941
                           0, 0xffffffff,
2942
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2943
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2944
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2945
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2946
                           0, 0xffffffff,
2947
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2948
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2949
                           DESC_W_MASK | DESC_A_MASK);
2950
    ESP = ECX;
2951
    EIP = EDX;
2952
#ifdef USE_KQEMU
2953
    if (kqemu_is_ok(env)) {
2954
        env->exception_index = -1;
2955
        cpu_loop_exit();
2956
    }
2957
#endif
2958
}
2959

    
2960
void helper_movl_crN_T0(int reg, target_ulong t0)
2961
{
2962
#if !defined(CONFIG_USER_ONLY)
2963
    switch(reg) {
2964
    case 0:
2965
        cpu_x86_update_cr0(env, t0);
2966
        break;
2967
    case 3:
2968
        cpu_x86_update_cr3(env, t0);
2969
        break;
2970
    case 4:
2971
        cpu_x86_update_cr4(env, t0);
2972
        break;
2973
    case 8:
2974
        cpu_set_apic_tpr(env, t0);
2975
        env->cr[8] = t0;
2976
        break;
2977
    default:
2978
        env->cr[reg] = t0;
2979
        break;
2980
    }
2981
#endif
2982
}
2983

    
2984
void helper_lmsw(target_ulong t0)
2985
{
2986
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2987
       if already set to one. */
2988
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2989
    helper_movl_crN_T0(0, t0);
2990
}
2991

    
2992
void helper_clts(void)
2993
{
2994
    env->cr[0] &= ~CR0_TS_MASK;
2995
    env->hflags &= ~HF_TS_MASK;
2996
}
2997

    
2998
#if !defined(CONFIG_USER_ONLY)
2999
target_ulong helper_movtl_T0_cr8(void)
3000
{
3001
    return cpu_get_apic_tpr(env);
3002
}
3003
#endif
3004

    
3005
/* XXX: do more */
3006
void helper_movl_drN_T0(int reg, target_ulong t0)
3007
{
3008
    env->dr[reg] = t0;
3009
}
3010

    
3011
void helper_invlpg(target_ulong addr)
3012
{
3013
    cpu_x86_flush_tlb(env, addr);
3014
}
3015

    
3016
void helper_rdtsc(void)
3017
{
3018
    uint64_t val;
3019

    
3020
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3021
        raise_exception(EXCP0D_GPF);
3022
    }
3023
    val = cpu_get_tsc(env);
3024
    EAX = (uint32_t)(val);
3025
    EDX = (uint32_t)(val >> 32);
3026
}
3027

    
3028
void helper_rdpmc(void)
3029
{
3030
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031
        raise_exception(EXCP0D_GPF);
3032
    }
3033

    
3034
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035
    
3036
    /* currently unimplemented */
3037
    raise_exception_err(EXCP06_ILLOP, 0);
3038
}
3039

    
3040
#if defined(CONFIG_USER_ONLY)
3041
void helper_wrmsr(void)
3042
{
3043
}
3044

    
3045
void helper_rdmsr(void)
3046
{
3047
}
3048
#else
3049
void helper_wrmsr(void)
3050
{
3051
    uint64_t val;
3052

    
3053
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3054

    
3055
    switch((uint32_t)ECX) {
3056
    case MSR_IA32_SYSENTER_CS:
3057
        env->sysenter_cs = val & 0xffff;
3058
        break;
3059
    case MSR_IA32_SYSENTER_ESP:
3060
        env->sysenter_esp = val;
3061
        break;
3062
    case MSR_IA32_SYSENTER_EIP:
3063
        env->sysenter_eip = val;
3064
        break;
3065
    case MSR_IA32_APICBASE:
3066
        cpu_set_apic_base(env, val);
3067
        break;
3068
    case MSR_EFER:
3069
        {
3070
            uint64_t update_mask;
3071
            update_mask = 0;
3072
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3073
                update_mask |= MSR_EFER_SCE;
3074
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3075
                update_mask |= MSR_EFER_LME;
3076
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3077
                update_mask |= MSR_EFER_FFXSR;
3078
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3079
                update_mask |= MSR_EFER_NXE;
3080
            env->efer = (env->efer & ~update_mask) |
3081
            (val & update_mask);
3082
        }
3083
        break;
3084
    case MSR_STAR:
3085
        env->star = val;
3086
        break;
3087
    case MSR_PAT:
3088
        env->pat = val;
3089
        break;
3090
    case MSR_VM_HSAVE_PA:
3091
        env->vm_hsave = val;
3092
        break;
3093
#ifdef TARGET_X86_64
3094
    case MSR_LSTAR:
3095
        env->lstar = val;
3096
        break;
3097
    case MSR_CSTAR:
3098
        env->cstar = val;
3099
        break;
3100
    case MSR_FMASK:
3101
        env->fmask = val;
3102
        break;
3103
    case MSR_FSBASE:
3104
        env->segs[R_FS].base = val;
3105
        break;
3106
    case MSR_GSBASE:
3107
        env->segs[R_GS].base = val;
3108
        break;
3109
    case MSR_KERNELGSBASE:
3110
        env->kernelgsbase = val;
3111
        break;
3112
#endif
3113
    default:
3114
        /* XXX: exception ? */
3115
        break;
3116
    }
3117
}
3118

    
3119
void helper_rdmsr(void)
3120
{
3121
    uint64_t val;
3122
    switch((uint32_t)ECX) {
3123
    case MSR_IA32_SYSENTER_CS:
3124
        val = env->sysenter_cs;
3125
        break;
3126
    case MSR_IA32_SYSENTER_ESP:
3127
        val = env->sysenter_esp;
3128
        break;
3129
    case MSR_IA32_SYSENTER_EIP:
3130
        val = env->sysenter_eip;
3131
        break;
3132
    case MSR_IA32_APICBASE:
3133
        val = cpu_get_apic_base(env);
3134
        break;
3135
    case MSR_EFER:
3136
        val = env->efer;
3137
        break;
3138
    case MSR_STAR:
3139
        val = env->star;
3140
        break;
3141
    case MSR_PAT:
3142
        val = env->pat;
3143
        break;
3144
    case MSR_VM_HSAVE_PA:
3145
        val = env->vm_hsave;
3146
        break;
3147
#ifdef TARGET_X86_64
3148
    case MSR_LSTAR:
3149
        val = env->lstar;
3150
        break;
3151
    case MSR_CSTAR:
3152
        val = env->cstar;
3153
        break;
3154
    case MSR_FMASK:
3155
        val = env->fmask;
3156
        break;
3157
    case MSR_FSBASE:
3158
        val = env->segs[R_FS].base;
3159
        break;
3160
    case MSR_GSBASE:
3161
        val = env->segs[R_GS].base;
3162
        break;
3163
    case MSR_KERNELGSBASE:
3164
        val = env->kernelgsbase;
3165
        break;
3166
#endif
3167
    default:
3168
        /* XXX: exception ? */
3169
        val = 0;
3170
        break;
3171
    }
3172
    EAX = (uint32_t)(val);
3173
    EDX = (uint32_t)(val >> 32);
3174
}
3175
#endif
3176

    
3177
target_ulong helper_lsl(target_ulong selector1)
3178
{
3179
    unsigned int limit;
3180
    uint32_t e1, e2, eflags, selector;
3181
    int rpl, dpl, cpl, type;
3182

    
3183
    selector = selector1 & 0xffff;
3184
    eflags = cc_table[CC_OP].compute_all();
3185
    if (load_segment(&e1, &e2, selector) != 0)
3186
        goto fail;
3187
    rpl = selector & 3;
3188
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3189
    cpl = env->hflags & HF_CPL_MASK;
3190
    if (e2 & DESC_S_MASK) {
3191
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3192
            /* conforming */
3193
        } else {
3194
            if (dpl < cpl || dpl < rpl)
3195
                goto fail;
3196
        }
3197
    } else {
3198
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3199
        switch(type) {
3200
        case 1:
3201
        case 2:
3202
        case 3:
3203
        case 9:
3204
        case 11:
3205
            break;
3206
        default:
3207
            goto fail;
3208
        }
3209
        if (dpl < cpl || dpl < rpl) {
3210
        fail:
3211
            CC_SRC = eflags & ~CC_Z;
3212
            return 0;
3213
        }
3214
    }
3215
    limit = get_seg_limit(e1, e2);
3216
    CC_SRC = eflags | CC_Z;
3217
    return limit;
3218
}
3219

    
3220
target_ulong helper_lar(target_ulong selector1)
3221
{
3222
    uint32_t e1, e2, eflags, selector;
3223
    int rpl, dpl, cpl, type;
3224

    
3225
    selector = selector1 & 0xffff;
3226
    eflags = cc_table[CC_OP].compute_all();
3227
    if ((selector & 0xfffc) == 0)
3228
        goto fail;
3229
    if (load_segment(&e1, &e2, selector) != 0)
3230
        goto fail;
3231
    rpl = selector & 3;
3232
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3233
    cpl = env->hflags & HF_CPL_MASK;
3234
    if (e2 & DESC_S_MASK) {
3235
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3236
            /* conforming */
3237
        } else {
3238
            if (dpl < cpl || dpl < rpl)
3239
                goto fail;
3240
        }
3241
    } else {
3242
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3243
        switch(type) {
3244
        case 1:
3245
        case 2:
3246
        case 3:
3247
        case 4:
3248
        case 5:
3249
        case 9:
3250
        case 11:
3251
        case 12:
3252
            break;
3253
        default:
3254
            goto fail;
3255
        }
3256
        if (dpl < cpl || dpl < rpl) {
3257
        fail:
3258
            CC_SRC = eflags & ~CC_Z;
3259
            return 0;
3260
        }
3261
    }
3262
    CC_SRC = eflags | CC_Z;
3263
    return e2 & 0x00f0ff00;
3264
}
3265

    
3266
void helper_verr(target_ulong selector1)
3267
{
3268
    uint32_t e1, e2, eflags, selector;
3269
    int rpl, dpl, cpl;
3270

    
3271
    selector = selector1 & 0xffff;
3272
    eflags = cc_table[CC_OP].compute_all();
3273
    if ((selector & 0xfffc) == 0)
3274
        goto fail;
3275
    if (load_segment(&e1, &e2, selector) != 0)
3276
        goto fail;
3277
    if (!(e2 & DESC_S_MASK))
3278
        goto fail;
3279
    rpl = selector & 3;
3280
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281
    cpl = env->hflags & HF_CPL_MASK;
3282
    if (e2 & DESC_CS_MASK) {
3283
        if (!(e2 & DESC_R_MASK))
3284
            goto fail;
3285
        if (!(e2 & DESC_C_MASK)) {
3286
            if (dpl < cpl || dpl < rpl)
3287
                goto fail;
3288
        }
3289
    } else {
3290
        if (dpl < cpl || dpl < rpl) {
3291
        fail:
3292
            CC_SRC = eflags & ~CC_Z;
3293
            return;
3294
        }
3295
    }
3296
    CC_SRC = eflags | CC_Z;
3297
}
3298

    
3299
void helper_verw(target_ulong selector1)
3300
{
3301
    uint32_t e1, e2, eflags, selector;
3302
    int rpl, dpl, cpl;
3303

    
3304
    selector = selector1 & 0xffff;
3305
    eflags = cc_table[CC_OP].compute_all();
3306
    if ((selector & 0xfffc) == 0)
3307
        goto fail;
3308
    if (load_segment(&e1, &e2, selector) != 0)
3309
        goto fail;
3310
    if (!(e2 & DESC_S_MASK))
3311
        goto fail;
3312
    rpl = selector & 3;
3313
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3314
    cpl = env->hflags & HF_CPL_MASK;
3315
    if (e2 & DESC_CS_MASK) {
3316
        goto fail;
3317
    } else {
3318
        if (dpl < cpl || dpl < rpl)
3319
            goto fail;
3320
        if (!(e2 & DESC_W_MASK)) {
3321
        fail:
3322
            CC_SRC = eflags & ~CC_Z;
3323
            return;
3324
        }
3325
    }
3326
    CC_SRC = eflags | CC_Z;
3327
}
3328

    
3329
/* x87 FPU helpers */
3330

    
3331
static void fpu_set_exception(int mask)
3332
{
3333
    env->fpus |= mask;
3334
    if (env->fpus & (~env->fpuc & FPUC_EM))
3335
        env->fpus |= FPUS_SE | FPUS_B;
3336
}
3337

    
3338
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3339
{
3340
    if (b == 0.0)
3341
        fpu_set_exception(FPUS_ZE);
3342
    return a / b;
3343
}
3344

    
3345
void fpu_raise_exception(void)
3346
{
3347
    if (env->cr[0] & CR0_NE_MASK) {
3348
        raise_exception(EXCP10_COPR);
3349
    }
3350
#if !defined(CONFIG_USER_ONLY)
3351
    else {
3352
        cpu_set_ferr(env);
3353
    }
3354
#endif
3355
}
3356

    
3357
void helper_flds_FT0(uint32_t val)
3358
{
3359
    union {
3360
        float32 f;
3361
        uint32_t i;
3362
    } u;
3363
    u.i = val;
3364
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3365
}
3366

    
3367
void helper_fldl_FT0(uint64_t val)
3368
{
3369
    union {
3370
        float64 f;
3371
        uint64_t i;
3372
    } u;
3373
    u.i = val;
3374
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3375
}
3376

    
3377
void helper_fildl_FT0(int32_t val)
3378
{
3379
    FT0 = int32_to_floatx(val, &env->fp_status);
3380
}
3381

    
3382
void helper_flds_ST0(uint32_t val)
3383
{
3384
    int new_fpstt;
3385
    union {
3386
        float32 f;
3387
        uint32_t i;
3388
    } u;
3389
    new_fpstt = (env->fpstt - 1) & 7;
3390
    u.i = val;
3391
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3392
    env->fpstt = new_fpstt;
3393
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3394
}
3395

    
3396
void helper_fldl_ST0(uint64_t val)
3397
{
3398
    int new_fpstt;
3399
    union {
3400
        float64 f;
3401
        uint64_t i;
3402
    } u;
3403
    new_fpstt = (env->fpstt - 1) & 7;
3404
    u.i = val;
3405
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3406
    env->fpstt = new_fpstt;
3407
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3408
}
3409

    
3410
void helper_fildl_ST0(int32_t val)
3411
{
3412
    int new_fpstt;
3413
    new_fpstt = (env->fpstt - 1) & 7;
3414
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3415
    env->fpstt = new_fpstt;
3416
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3417
}
3418

    
3419
void helper_fildll_ST0(int64_t val)
3420
{
3421
    int new_fpstt;
3422
    new_fpstt = (env->fpstt - 1) & 7;
3423
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3424
    env->fpstt = new_fpstt;
3425
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3426
}
3427

    
3428
uint32_t helper_fsts_ST0(void)
3429
{
3430
    union {
3431
        float32 f;
3432
        uint32_t i;
3433
    } u;
3434
    u.f = floatx_to_float32(ST0, &env->fp_status);
3435
    return u.i;
3436
}
3437

    
3438
uint64_t helper_fstl_ST0(void)
3439
{
3440
    union {
3441
        float64 f;
3442
        uint64_t i;
3443
    } u;
3444
    u.f = floatx_to_float64(ST0, &env->fp_status);
3445
    return u.i;
3446
}
3447

    
3448
int32_t helper_fist_ST0(void)
3449
{
3450
    int32_t val;
3451
    val = floatx_to_int32(ST0, &env->fp_status);
3452
    if (val != (int16_t)val)
3453
        val = -32768;
3454
    return val;
3455
}
3456

    
3457
int32_t helper_fistl_ST0(void)
3458
{
3459
    int32_t val;
3460
    val = floatx_to_int32(ST0, &env->fp_status);
3461
    return val;
3462
}
3463

    
3464
int64_t helper_fistll_ST0(void)
3465
{
3466
    int64_t val;
3467
    val = floatx_to_int64(ST0, &env->fp_status);
3468
    return val;
3469
}
3470

    
3471
int32_t helper_fistt_ST0(void)
3472
{
3473
    int32_t val;
3474
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3475
    if (val != (int16_t)val)
3476
        val = -32768;
3477
    return val;
3478
}
3479

    
3480
int32_t helper_fisttl_ST0(void)
3481
{
3482
    int32_t val;
3483
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3484
    return val;
3485
}
3486

    
3487
int64_t helper_fisttll_ST0(void)
3488
{
3489
    int64_t val;
3490
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3491
    return val;
3492
}
3493

    
3494
void helper_fldt_ST0(target_ulong ptr)
3495
{
3496
    int new_fpstt;
3497
    new_fpstt = (env->fpstt - 1) & 7;
3498
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3499
    env->fpstt = new_fpstt;
3500
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3501
}
3502

    
3503
void helper_fstt_ST0(target_ulong ptr)
3504
{
3505
    helper_fstt(ST0, ptr);
3506
}
3507

    
3508
void helper_fpush(void)
3509
{
3510
    fpush();
3511
}
3512

    
3513
void helper_fpop(void)
3514
{
3515
    fpop();
3516
}
3517

    
3518
void helper_fdecstp(void)
3519
{
3520
    env->fpstt = (env->fpstt - 1) & 7;
3521
    env->fpus &= (~0x4700);
3522
}
3523

    
3524
void helper_fincstp(void)
3525
{
3526
    env->fpstt = (env->fpstt + 1) & 7;
3527
    env->fpus &= (~0x4700);
3528
}
3529

    
3530
/* FPU move */
3531

    
3532
void helper_ffree_STN(int st_index)
3533
{
3534
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3535
}
3536

    
3537
void helper_fmov_ST0_FT0(void)
3538
{
3539
    ST0 = FT0;
3540
}
3541

    
3542
void helper_fmov_FT0_STN(int st_index)
3543
{
3544
    FT0 = ST(st_index);
3545
}
3546

    
3547
void helper_fmov_ST0_STN(int st_index)
3548
{
3549
    ST0 = ST(st_index);
3550
}
3551

    
3552
void helper_fmov_STN_ST0(int st_index)
3553
{
3554
    ST(st_index) = ST0;
3555
}
3556

    
3557
void helper_fxchg_ST0_STN(int st_index)
3558
{
3559
    CPU86_LDouble tmp;
3560
    tmp = ST(st_index);
3561
    ST(st_index) = ST0;
3562
    ST0 = tmp;
3563
}
3564

    
3565
/* FPU operations */
3566

    
3567
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3568

    
3569
void helper_fcom_ST0_FT0(void)
3570
{
3571
    int ret;
3572

    
3573
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3574
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3575
    FORCE_RET();
3576
}
3577

    
3578
void helper_fucom_ST0_FT0(void)
3579
{
3580
    int ret;
3581

    
3582
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3583
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3584
    FORCE_RET();
3585
}
3586

    
3587
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3588

    
3589
void helper_fcomi_ST0_FT0(void)
3590
{
3591
    int eflags;
3592
    int ret;
3593

    
3594
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3595
    eflags = cc_table[CC_OP].compute_all();
3596
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3597
    CC_SRC = eflags;
3598
    FORCE_RET();
3599
}
3600

    
3601
void helper_fucomi_ST0_FT0(void)
3602
{
3603
    int eflags;
3604
    int ret;
3605

    
3606
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3607
    eflags = cc_table[CC_OP].compute_all();
3608
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3609
    CC_SRC = eflags;
3610
    FORCE_RET();
3611
}
3612

    
3613
void helper_fadd_ST0_FT0(void)
3614
{
3615
    ST0 += FT0;
3616
}
3617

    
3618
void helper_fmul_ST0_FT0(void)
3619
{
3620
    ST0 *= FT0;
3621
}
3622

    
3623
void helper_fsub_ST0_FT0(void)
3624
{
3625
    ST0 -= FT0;
3626
}
3627

    
3628
void helper_fsubr_ST0_FT0(void)
3629
{
3630
    ST0 = FT0 - ST0;
3631
}
3632

    
3633
void helper_fdiv_ST0_FT0(void)
3634
{
3635
    ST0 = helper_fdiv(ST0, FT0);
3636
}
3637

    
3638
void helper_fdivr_ST0_FT0(void)
3639
{
3640
    ST0 = helper_fdiv(FT0, ST0);
3641
}
3642

    
3643
/* fp operations between STN and ST0 */
3644

    
3645
void helper_fadd_STN_ST0(int st_index)
3646
{
3647
    ST(st_index) += ST0;
3648
}
3649

    
3650
void helper_fmul_STN_ST0(int st_index)
3651
{
3652
    ST(st_index) *= ST0;
3653
}
3654

    
3655
void helper_fsub_STN_ST0(int st_index)
3656
{
3657
    ST(st_index) -= ST0;
3658
}
3659

    
3660
void helper_fsubr_STN_ST0(int st_index)
3661
{
3662
    CPU86_LDouble *p;
3663
    p = &ST(st_index);
3664
    *p = ST0 - *p;
3665
}
3666

    
3667
void helper_fdiv_STN_ST0(int st_index)
3668
{
3669
    CPU86_LDouble *p;
3670
    p = &ST(st_index);
3671
    *p = helper_fdiv(*p, ST0);
3672
}
3673

    
3674
void helper_fdivr_STN_ST0(int st_index)
3675
{
3676
    CPU86_LDouble *p;
3677
    p = &ST(st_index);
3678
    *p = helper_fdiv(ST0, *p);
3679
}
3680

    
3681
/* misc FPU operations */
3682
void helper_fchs_ST0(void)
3683
{
3684
    ST0 = floatx_chs(ST0);
3685
}
3686

    
3687
void helper_fabs_ST0(void)
3688
{
3689
    ST0 = floatx_abs(ST0);
3690
}
3691

    
3692
void helper_fld1_ST0(void)
3693
{
3694
    ST0 = f15rk[1];
3695
}
3696

    
3697
void helper_fldl2t_ST0(void)
3698
{
3699
    ST0 = f15rk[6];
3700
}
3701

    
3702
void helper_fldl2e_ST0(void)
3703
{
3704
    ST0 = f15rk[5];
3705
}
3706

    
3707
void helper_fldpi_ST0(void)
3708
{
3709
    ST0 = f15rk[2];
3710
}
3711

    
3712
void helper_fldlg2_ST0(void)
3713
{
3714
    ST0 = f15rk[3];
3715
}
3716

    
3717
void helper_fldln2_ST0(void)
3718
{
3719
    ST0 = f15rk[4];
3720
}
3721

    
3722
void helper_fldz_ST0(void)
3723
{
3724
    ST0 = f15rk[0];
3725
}
3726

    
3727
void helper_fldz_FT0(void)
3728
{
3729
    FT0 = f15rk[0];
3730
}
3731

    
3732
uint32_t helper_fnstsw(void)
3733
{
3734
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3735
}
3736

    
3737
uint32_t helper_fnstcw(void)
3738
{
3739
    return env->fpuc;
3740
}
3741

    
3742
static void update_fp_status(void)
3743
{
3744
    int rnd_type;
3745

    
3746
    /* set rounding mode */
3747
    switch(env->fpuc & RC_MASK) {
3748
    default:
3749
    case RC_NEAR:
3750
        rnd_type = float_round_nearest_even;
3751
        break;
3752
    case RC_DOWN:
3753
        rnd_type = float_round_down;
3754
        break;
3755
    case RC_UP:
3756
        rnd_type = float_round_up;
3757
        break;
3758
    case RC_CHOP:
3759
        rnd_type = float_round_to_zero;
3760
        break;
3761
    }
3762
    set_float_rounding_mode(rnd_type, &env->fp_status);
3763
#ifdef FLOATX80
3764
    switch((env->fpuc >> 8) & 3) {
3765
    case 0:
3766
        rnd_type = 32;
3767
        break;
3768
    case 2:
3769
        rnd_type = 64;
3770
        break;
3771
    case 3:
3772
    default:
3773
        rnd_type = 80;
3774
        break;
3775
    }
3776
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3777
#endif
3778
}
3779

    
3780
void helper_fldcw(uint32_t val)
3781
{
3782
    env->fpuc = val;
3783
    update_fp_status();
3784
}
3785

    
3786
void helper_fclex(void)
3787
{
3788
    env->fpus &= 0x7f00;
3789
}
3790

    
3791
void helper_fwait(void)
3792
{
3793
    if (env->fpus & FPUS_SE)
3794
        fpu_raise_exception();
3795
    FORCE_RET();
3796
}
3797

    
3798
void helper_fninit(void)
3799
{
3800
    env->fpus = 0;
3801
    env->fpstt = 0;
3802
    env->fpuc = 0x37f;
3803
    env->fptags[0] = 1;
3804
    env->fptags[1] = 1;
3805
    env->fptags[2] = 1;
3806
    env->fptags[3] = 1;
3807
    env->fptags[4] = 1;
3808
    env->fptags[5] = 1;
3809
    env->fptags[6] = 1;
3810
    env->fptags[7] = 1;
3811
}
3812

    
3813
/* BCD ops */
3814

    
3815
void helper_fbld_ST0(target_ulong ptr)
3816
{
3817
    CPU86_LDouble tmp;
3818
    uint64_t val;
3819
    unsigned int v;
3820
    int i;
3821

    
3822
    val = 0;
3823
    for(i = 8; i >= 0; i--) {
3824
        v = ldub(ptr + i);
3825
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3826
    }
3827
    tmp = val;
3828
    if (ldub(ptr + 9) & 0x80)
3829
        tmp = -tmp;
3830
    fpush();
3831
    ST0 = tmp;
3832
}
3833

    
3834
void helper_fbst_ST0(target_ulong ptr)
3835
{
3836
    int v;
3837
    target_ulong mem_ref, mem_end;
3838
    int64_t val;
3839

    
3840
    val = floatx_to_int64(ST0, &env->fp_status);
3841
    mem_ref = ptr;
3842
    mem_end = mem_ref + 9;
3843
    if (val < 0) {
3844
        stb(mem_end, 0x80);
3845
        val = -val;
3846
    } else {
3847
        stb(mem_end, 0x00);
3848
    }
3849
    while (mem_ref < mem_end) {
3850
        if (val == 0)
3851
            break;
3852
        v = val % 100;
3853
        val = val / 100;
3854
        v = ((v / 10) << 4) | (v % 10);
3855
        stb(mem_ref++, v);
3856
    }
3857
    while (mem_ref < mem_end) {
3858
        stb(mem_ref++, 0);
3859
    }
3860
}
3861

    
3862
void helper_f2xm1(void)
3863
{
3864
    ST0 = pow(2.0,ST0) - 1.0;
3865
}
3866

    
3867
void helper_fyl2x(void)
3868
{
3869
    CPU86_LDouble fptemp;
3870

    
3871
    fptemp = ST0;
3872
    if (fptemp>0.0){
3873
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3874
        ST1 *= fptemp;
3875
        fpop();
3876
    } else {
3877
        env->fpus &= (~0x4700);
3878
        env->fpus |= 0x400;
3879
    }
3880
}
3881

    
3882
void helper_fptan(void)
3883
{
3884
    CPU86_LDouble fptemp;
3885

    
3886
    fptemp = ST0;
3887
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3888
        env->fpus |= 0x400;
3889
    } else {
3890
        ST0 = tan(fptemp);
3891
        fpush();
3892
        ST0 = 1.0;
3893
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3894
        /* the above code is for  |arg| < 2**52 only */
3895
    }
3896
}
3897

    
3898
void helper_fpatan(void)
3899
{
3900
    CPU86_LDouble fptemp, fpsrcop;
3901

    
3902
    fpsrcop = ST1;
3903
    fptemp = ST0;
3904
    ST1 = atan2(fpsrcop,fptemp);
3905
    fpop();
3906
}
3907

    
3908
void helper_fxtract(void)
3909
{
3910
    CPU86_LDoubleU temp;
3911
    unsigned int expdif;
3912

    
3913
    temp.d = ST0;
3914
    expdif = EXPD(temp) - EXPBIAS;
3915
    /*DP exponent bias*/
3916
    ST0 = expdif;
3917
    fpush();
3918
    BIASEXPONENT(temp);
3919
    ST0 = temp.d;
3920
}
3921

    
3922
void helper_fprem1(void)
3923
{
3924
    CPU86_LDouble dblq, fpsrcop, fptemp;
3925
    CPU86_LDoubleU fpsrcop1, fptemp1;
3926
    int expdif;
3927
    signed long long int q;
3928

    
3929
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3930
        ST0 = 0.0 / 0.0; /* NaN */
3931
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3932
        return;
3933
    }
3934

    
3935
    fpsrcop = ST0;
3936
    fptemp = ST1;
3937
    fpsrcop1.d = fpsrcop;
3938
    fptemp1.d = fptemp;
3939
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3940

    
3941
    if (expdif < 0) {
3942
        /* optimisation? taken from the AMD docs */
3943
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3944
        /* ST0 is unchanged */
3945
        return;
3946
    }
3947

    
3948
    if (expdif < 53) {
3949
        dblq = fpsrcop / fptemp;
3950
        /* round dblq towards nearest integer */
3951
        dblq = rint(dblq);
3952
        ST0 = fpsrcop - fptemp * dblq;
3953

    
3954
        /* convert dblq to q by truncating towards zero */
3955
        if (dblq < 0.0)
3956
           q = (signed long long int)(-dblq);
3957
        else
3958
           q = (signed long long int)dblq;
3959

    
3960
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3961
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3962
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3963
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3964
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3965
    } else {
3966
        env->fpus |= 0x400;  /* C2 <-- 1 */
3967
        fptemp = pow(2.0, expdif - 50);
3968
        fpsrcop = (ST0 / ST1) / fptemp;
3969
        /* fpsrcop = integer obtained by chopping */
3970
        fpsrcop = (fpsrcop < 0.0) ?
3971
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3972
        ST0 -= (ST1 * fpsrcop * fptemp);
3973
    }
3974
}
3975

    
3976
void helper_fprem(void)
3977
{
3978
    CPU86_LDouble dblq, fpsrcop, fptemp;
3979
    CPU86_LDoubleU fpsrcop1, fptemp1;
3980
    int expdif;
3981
    signed long long int q;
3982

    
3983
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3984
       ST0 = 0.0 / 0.0; /* NaN */
3985
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986
       return;
3987
    }
3988

    
3989
    fpsrcop = (CPU86_LDouble)ST0;
3990
    fptemp = (CPU86_LDouble)ST1;
3991
    fpsrcop1.d = fpsrcop;
3992
    fptemp1.d = fptemp;
3993
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3994

    
3995
    if (expdif < 0) {
3996
        /* optimisation? taken from the AMD docs */
3997
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998
        /* ST0 is unchanged */
3999
        return;
4000
    }
4001

    
4002
    if ( expdif < 53 ) {
4003
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4004
        /* round dblq towards zero */
4005
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4006
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4007

    
4008
        /* convert dblq to q by truncating towards zero */
4009
        if (dblq < 0.0)
4010
           q = (signed long long int)(-dblq);
4011
        else
4012
           q = (signed long long int)dblq;
4013

    
4014
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4016
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4017
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4018
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4019
    } else {
4020
        int N = 32 + (expdif % 32); /* as per AMD docs */
4021
        env->fpus |= 0x400;  /* C2 <-- 1 */
4022
        fptemp = pow(2.0, (double)(expdif - N));
4023
        fpsrcop = (ST0 / ST1) / fptemp;
4024
        /* fpsrcop = integer obtained by chopping */
4025
        fpsrcop = (fpsrcop < 0.0) ?
4026
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4027
        ST0 -= (ST1 * fpsrcop * fptemp);
4028
    }
4029
}
4030

    
4031
void helper_fyl2xp1(void)
4032
{
4033
    CPU86_LDouble fptemp;
4034

    
4035
    fptemp = ST0;
4036
    if ((fptemp+1.0)>0.0) {
4037
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4038
        ST1 *= fptemp;
4039
        fpop();
4040
    } else {
4041
        env->fpus &= (~0x4700);
4042
        env->fpus |= 0x400;
4043
    }
4044
}
4045

    
4046
void helper_fsqrt(void)
4047
{
4048
    CPU86_LDouble fptemp;
4049

    
4050
    fptemp = ST0;
4051
    if (fptemp<0.0) {
4052
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4053
        env->fpus |= 0x400;
4054
    }
4055
    ST0 = sqrt(fptemp);
4056
}
4057

    
4058
void helper_fsincos(void)
4059
{
4060
    CPU86_LDouble fptemp;
4061

    
4062
    fptemp = ST0;
4063
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4064
        env->fpus |= 0x400;
4065
    } else {
4066
        ST0 = sin(fptemp);
4067
        fpush();
4068
        ST0 = cos(fptemp);
4069
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4070
        /* the above code is for  |arg| < 2**63 only */
4071
    }
4072
}
4073

    
4074
void helper_frndint(void)
4075
{
4076
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4077
}
4078

    
4079
void helper_fscale(void)
4080
{
4081
    ST0 = ldexp (ST0, (int)(ST1));
4082
}
4083

    
4084
void helper_fsin(void)
4085
{
4086
    CPU86_LDouble fptemp;
4087

    
4088
    fptemp = ST0;
4089
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4090
        env->fpus |= 0x400;
4091
    } else {
4092
        ST0 = sin(fptemp);
4093
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4094
        /* the above code is for  |arg| < 2**53 only */
4095
    }
4096
}
4097

    
4098
void helper_fcos(void)
4099
{
4100
    CPU86_LDouble fptemp;
4101

    
4102
    fptemp = ST0;
4103
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4104
        env->fpus |= 0x400;
4105
    } else {
4106
        ST0 = cos(fptemp);
4107
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4108
        /* the above code is for  |arg5 < 2**63 only */
4109
    }
4110
}
4111

    
4112
void helper_fxam_ST0(void)
4113
{
4114
    CPU86_LDoubleU temp;
4115
    int expdif;
4116

    
4117
    temp.d = ST0;
4118

    
4119
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4120
    if (SIGND(temp))
4121
        env->fpus |= 0x200; /* C1 <-- 1 */
4122

    
4123
    /* XXX: test fptags too */
4124
    expdif = EXPD(temp);
4125
    if (expdif == MAXEXPD) {
4126
#ifdef USE_X86LDOUBLE
4127
        if (MANTD(temp) == 0x8000000000000000ULL)
4128
#else
4129
        if (MANTD(temp) == 0)
4130
#endif
4131
            env->fpus |=  0x500 /*Infinity*/;
4132
        else
4133
            env->fpus |=  0x100 /*NaN*/;
4134
    } else if (expdif == 0) {
4135
        if (MANTD(temp) == 0)
4136
            env->fpus |=  0x4000 /*Zero*/;
4137
        else
4138
            env->fpus |= 0x4400 /*Denormal*/;
4139
    } else {
4140
        env->fpus |= 0x400;
4141
    }
4142
}
4143

    
4144
void helper_fstenv(target_ulong ptr, int data32)
4145
{
4146
    int fpus, fptag, exp, i;
4147
    uint64_t mant;
4148
    CPU86_LDoubleU tmp;
4149

    
4150
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4151
    fptag = 0;
4152
    for (i=7; i>=0; i--) {
4153
        fptag <<= 2;
4154
        if (env->fptags[i]) {
4155
            fptag |= 3;
4156
        } else {
4157
            tmp.d = env->fpregs[i].d;
4158
            exp = EXPD(tmp);
4159
            mant = MANTD(tmp);
4160
            if (exp == 0 && mant == 0) {
4161
                /* zero */
4162
                fptag |= 1;
4163
            } else if (exp == 0 || exp == MAXEXPD
4164
#ifdef USE_X86LDOUBLE
4165
                       || (mant & (1LL << 63)) == 0
4166
#endif
4167
                       ) {
4168
                /* NaNs, infinity, denormal */
4169
                fptag |= 2;
4170
            }
4171
        }
4172
    }
4173
    if (data32) {
4174
        /* 32 bit */
4175
        stl(ptr, env->fpuc);
4176
        stl(ptr + 4, fpus);
4177
        stl(ptr + 8, fptag);
4178
        stl(ptr + 12, 0); /* fpip */
4179
        stl(ptr + 16, 0); /* fpcs */
4180
        stl(ptr + 20, 0); /* fpoo */
4181
        stl(ptr + 24, 0); /* fpos */
4182
    } else {
4183
        /* 16 bit */
4184
        stw(ptr, env->fpuc);
4185
        stw(ptr + 2, fpus);
4186
        stw(ptr + 4, fptag);
4187
        stw(ptr + 6, 0);
4188
        stw(ptr + 8, 0);
4189
        stw(ptr + 10, 0);
4190
        stw(ptr + 12, 0);
4191
    }
4192
}
4193

    
4194
void helper_fldenv(target_ulong ptr, int data32)
4195
{
4196
    int i, fpus, fptag;
4197

    
4198
    if (data32) {
4199
        env->fpuc = lduw(ptr);
4200
        fpus = lduw(ptr + 4);
4201
        fptag = lduw(ptr + 8);
4202
    }
4203
    else {
4204
        env->fpuc = lduw(ptr);
4205
        fpus = lduw(ptr + 2);
4206
        fptag = lduw(ptr + 4);
4207
    }
4208
    env->fpstt = (fpus >> 11) & 7;
4209
    env->fpus = fpus & ~0x3800;
4210
    for(i = 0;i < 8; i++) {
4211
        env->fptags[i] = ((fptag & 3) == 3);
4212
        fptag >>= 2;
4213
    }
4214
}
4215

    
4216
void helper_fsave(target_ulong ptr, int data32)
4217
{
4218
    CPU86_LDouble tmp;
4219
    int i;
4220

    
4221
    helper_fstenv(ptr, data32);
4222

    
4223
    ptr += (14 << data32);
4224
    for(i = 0;i < 8; i++) {
4225
        tmp = ST(i);
4226
        helper_fstt(tmp, ptr);
4227
        ptr += 10;
4228
    }
4229

    
4230
    /* fninit */
4231
    env->fpus = 0;
4232
    env->fpstt = 0;
4233
    env->fpuc = 0x37f;
4234
    env->fptags[0] = 1;
4235
    env->fptags[1] = 1;
4236
    env->fptags[2] = 1;
4237
    env->fptags[3] = 1;
4238
    env->fptags[4] = 1;
4239
    env->fptags[5] = 1;
4240
    env->fptags[6] = 1;
4241
    env->fptags[7] = 1;
4242
}
4243

    
4244
void helper_frstor(target_ulong ptr, int data32)
4245
{
4246
    CPU86_LDouble tmp;
4247
    int i;
4248

    
4249
    helper_fldenv(ptr, data32);
4250
    ptr += (14 << data32);
4251

    
4252
    for(i = 0;i < 8; i++) {
4253
        tmp = helper_fldt(ptr);
4254
        ST(i) = tmp;
4255
        ptr += 10;
4256
    }
4257
}
4258

    
4259
void helper_fxsave(target_ulong ptr, int data64)
4260
{
4261
    int fpus, fptag, i, nb_xmm_regs;
4262
    CPU86_LDouble tmp;
4263
    target_ulong addr;
4264

    
4265
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4266
    fptag = 0;
4267
    for(i = 0; i < 8; i++) {
4268
        fptag |= (env->fptags[i] << i);
4269
    }
4270
    stw(ptr, env->fpuc);
4271
    stw(ptr + 2, fpus);
4272
    stw(ptr + 4, fptag ^ 0xff);
4273
#ifdef TARGET_X86_64
4274
    if (data64) {
4275
        stq(ptr + 0x08, 0); /* rip */
4276
        stq(ptr + 0x10, 0); /* rdp */
4277
    } else 
4278
#endif
4279
    {
4280
        stl(ptr + 0x08, 0); /* eip */
4281
        stl(ptr + 0x0c, 0); /* sel  */
4282
        stl(ptr + 0x10, 0); /* dp */
4283
        stl(ptr + 0x14, 0); /* sel  */
4284
    }
4285

    
4286
    addr = ptr + 0x20;
4287
    for(i = 0;i < 8; i++) {
4288
        tmp = ST(i);
4289
        helper_fstt(tmp, addr);
4290
        addr += 16;
4291
    }
4292

    
4293
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4294
        /* XXX: finish it */
4295
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4296
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4297
        if (env->hflags & HF_CS64_MASK)
4298
            nb_xmm_regs = 16;
4299
        else
4300
            nb_xmm_regs = 8;
4301
        addr = ptr + 0xa0;
4302
        for(i = 0; i < nb_xmm_regs; i++) {
4303
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4304
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4305
            addr += 16;
4306
        }
4307
    }
4308
}
4309

    
4310
void helper_fxrstor(target_ulong ptr, int data64)
4311
{
4312
    int i, fpus, fptag, nb_xmm_regs;
4313
    CPU86_LDouble tmp;
4314
    target_ulong addr;
4315

    
4316
    env->fpuc = lduw(ptr);
4317
    fpus = lduw(ptr + 2);
4318
    fptag = lduw(ptr + 4);
4319
    env->fpstt = (fpus >> 11) & 7;
4320
    env->fpus = fpus & ~0x3800;
4321
    fptag ^= 0xff;
4322
    for(i = 0;i < 8; i++) {
4323
        env->fptags[i] = ((fptag >> i) & 1);
4324
    }
4325

    
4326
    addr = ptr + 0x20;
4327
    for(i = 0;i < 8; i++) {
4328
        tmp = helper_fldt(addr);
4329
        ST(i) = tmp;
4330
        addr += 16;
4331
    }
4332

    
4333
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4334
        /* XXX: finish it */
4335
        env->mxcsr = ldl(ptr + 0x18);
4336
        //ldl(ptr + 0x1c);
4337
        if (env->hflags & HF_CS64_MASK)
4338
            nb_xmm_regs = 16;
4339
        else
4340
            nb_xmm_regs = 8;
4341
        addr = ptr + 0xa0;
4342
        for(i = 0; i < nb_xmm_regs; i++) {
4343
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4344
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4345
            addr += 16;
4346
        }
4347
    }
4348
}
4349

    
4350
#ifndef USE_X86LDOUBLE
4351

    
4352
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4353
{
4354
    CPU86_LDoubleU temp;
4355
    int e;
4356

    
4357
    temp.d = f;
4358
    /* mantissa */
4359
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4360
    /* exponent + sign */
4361
    e = EXPD(temp) - EXPBIAS + 16383;
4362
    e |= SIGND(temp) >> 16;
4363
    *pexp = e;
4364
}
4365

    
4366
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4367
{
4368
    CPU86_LDoubleU temp;
4369
    int e;
4370
    uint64_t ll;
4371

    
4372
    /* XXX: handle overflow ? */
4373
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4374
    e |= (upper >> 4) & 0x800; /* sign */
4375
    ll = (mant >> 11) & ((1LL << 52) - 1);
4376
#ifdef __arm__
4377
    temp.l.upper = (e << 20) | (ll >> 32);
4378
    temp.l.lower = ll;
4379
#else
4380
    temp.ll = ll | ((uint64_t)e << 52);
4381
#endif
4382
    return temp.d;
4383
}
4384

    
4385
#else
4386

    
4387
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4388
{
4389
    CPU86_LDoubleU temp;
4390

    
4391
    temp.d = f;
4392
    *pmant = temp.l.lower;
4393
    *pexp = temp.l.upper;
4394
}
4395

    
4396
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4397
{
4398
    CPU86_LDoubleU temp;
4399

    
4400
    temp.l.upper = upper;
4401
    temp.l.lower = mant;
4402
    return temp.d;
4403
}
4404
#endif
4405

    
4406
#ifdef TARGET_X86_64
4407

    
4408
//#define DEBUG_MULDIV
4409

    
4410
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4411
{
4412
    *plow += a;
4413
    /* carry test */
4414
    if (*plow < a)
4415
        (*phigh)++;
4416
    *phigh += b;
4417
}
4418

    
4419
static void neg128(uint64_t *plow, uint64_t *phigh)
4420
{
4421
    *plow = ~ *plow;
4422
    *phigh = ~ *phigh;
4423
    add128(plow, phigh, 1, 0);
4424
}
4425

    
4426
/* return TRUE if overflow */
4427
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4428
{
4429
    uint64_t q, r, a1, a0;
4430
    int i, qb, ab;
4431

    
4432
    a0 = *plow;
4433
    a1 = *phigh;
4434
    if (a1 == 0) {
4435
        q = a0 / b;
4436
        r = a0 % b;
4437
        *plow = q;
4438
        *phigh = r;
4439
    } else {
4440
        if (a1 >= b)
4441
            return 1;
4442
        /* XXX: use a better algorithm */
4443
        for(i = 0; i < 64; i++) {
4444
            ab = a1 >> 63;
4445
            a1 = (a1 << 1) | (a0 >> 63);
4446
            if (ab || a1 >= b) {
4447
                a1 -= b;
4448
                qb = 1;
4449
            } else {
4450
                qb = 0;
4451
            }
4452
            a0 = (a0 << 1) | qb;
4453
        }
4454
#if defined(DEBUG_MULDIV)
4455
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4456
               *phigh, *plow, b, a0, a1);
4457
#endif
4458
        *plow = a0;
4459
        *phigh = a1;
4460
    }
4461
    return 0;
4462
}
4463

    
4464
/* return TRUE if overflow */
4465
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4466
{
4467
    int sa, sb;
4468
    sa = ((int64_t)*phigh < 0);
4469
    if (sa)
4470
        neg128(plow, phigh);
4471
    sb = (b < 0);
4472
    if (sb)
4473
        b = -b;
4474
    if (div64(plow, phigh, b) != 0)
4475
        return 1;
4476
    if (sa ^ sb) {
4477
        if (*plow > (1ULL << 63))
4478
            return 1;
4479
        *plow = - *plow;
4480
    } else {
4481
        if (*plow >= (1ULL << 63))
4482
            return 1;
4483
    }
4484
    if (sa)
4485
        *phigh = - *phigh;
4486
    return 0;
4487
}
4488

    
4489
void helper_mulq_EAX_T0(target_ulong t0)
4490
{
4491
    uint64_t r0, r1;
4492

    
4493
    mulu64(&r0, &r1, EAX, t0);
4494
    EAX = r0;
4495
    EDX = r1;
4496
    CC_DST = r0;
4497
    CC_SRC = r1;
4498
}
4499

    
4500
void helper_imulq_EAX_T0(target_ulong t0)
4501
{
4502
    uint64_t r0, r1;
4503

    
4504
    muls64(&r0, &r1, EAX, t0);
4505
    EAX = r0;
4506
    EDX = r1;
4507
    CC_DST = r0;
4508
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4509
}
4510

    
4511
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4512
{
4513
    uint64_t r0, r1;
4514

    
4515
    muls64(&r0, &r1, t0, t1);
4516
    CC_DST = r0;
4517
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4518
    return r0;
4519
}
4520

    
4521
void helper_divq_EAX(target_ulong t0)
4522
{
4523
    uint64_t r0, r1;
4524
    if (t0 == 0) {
4525
        raise_exception(EXCP00_DIVZ);
4526
    }
4527
    r0 = EAX;
4528
    r1 = EDX;
4529
    if (div64(&r0, &r1, t0))
4530
        raise_exception(EXCP00_DIVZ);
4531
    EAX = r0;
4532
    EDX = r1;
4533
}
4534

    
4535
void helper_idivq_EAX(target_ulong t0)
4536
{
4537
    uint64_t r0, r1;
4538
    if (t0 == 0) {
4539
        raise_exception(EXCP00_DIVZ);
4540
    }
4541
    r0 = EAX;
4542
    r1 = EDX;
4543
    if (idiv64(&r0, &r1, t0))
4544
        raise_exception(EXCP00_DIVZ);
4545
    EAX = r0;
4546
    EDX = r1;
4547
}
4548
#endif
4549

    
4550
void helper_hlt(void)
4551
{
4552
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4553
    env->hflags |= HF_HALTED_MASK;
4554
    env->exception_index = EXCP_HLT;
4555
    cpu_loop_exit();
4556
}
4557

    
4558
void helper_monitor(target_ulong ptr)
4559
{
4560
    if ((uint32_t)ECX != 0)
4561
        raise_exception(EXCP0D_GPF);
4562
    /* XXX: store address ? */
4563
}
4564

    
4565
void helper_mwait(void)
4566
{
4567
    if ((uint32_t)ECX != 0)
4568
        raise_exception(EXCP0D_GPF);
4569
    /* XXX: not complete but not completely erroneous */
4570
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4571
        /* more than one CPU: do not sleep because another CPU may
4572
           wake this one */
4573
    } else {
4574
        helper_hlt();
4575
    }
4576
}
4577

    
4578
void helper_debug(void)
4579
{
4580
    env->exception_index = EXCP_DEBUG;
4581
    cpu_loop_exit();
4582
}
4583

    
4584
void helper_raise_interrupt(int intno, int next_eip_addend)
4585
{
4586
    raise_interrupt(intno, 1, 0, next_eip_addend);
4587
}
4588

    
4589
void helper_raise_exception(int exception_index)
4590
{
4591
    raise_exception(exception_index);
4592
}
4593

    
4594
void helper_cli(void)
4595
{
4596
    env->eflags &= ~IF_MASK;
4597
}
4598

    
4599
void helper_sti(void)
4600
{
4601
    env->eflags |= IF_MASK;
4602
}
4603

    
4604
#if 0
4605
/* vm86plus instructions */
4606
void helper_cli_vm(void)
4607
{
4608
    env->eflags &= ~VIF_MASK;
4609
}
4610

4611
void helper_sti_vm(void)
4612
{
4613
    env->eflags |= VIF_MASK;
4614
    if (env->eflags & VIP_MASK) {
4615
        raise_exception(EXCP0D_GPF);
4616
    }
4617
}
4618
#endif
4619

    
4620
void helper_set_inhibit_irq(void)
4621
{
4622
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4623
}
4624

    
4625
void helper_reset_inhibit_irq(void)
4626
{
4627
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4628
}
4629

    
4630
void helper_boundw(target_ulong a0, int v)
4631
{
4632
    int low, high;
4633
    low = ldsw(a0);
4634
    high = ldsw(a0 + 2);
4635
    v = (int16_t)v;
4636
    if (v < low || v > high) {
4637
        raise_exception(EXCP05_BOUND);
4638
    }
4639
    FORCE_RET();
4640
}
4641

    
4642
void helper_boundl(target_ulong a0, int v)
4643
{
4644
    int low, high;
4645
    low = ldl(a0);
4646
    high = ldl(a0 + 4);
4647
    if (v < low || v > high) {
4648
        raise_exception(EXCP05_BOUND);
4649
    }
4650
    FORCE_RET();
4651
}
4652

    
4653
static float approx_rsqrt(float a)
4654
{
4655
    return 1.0 / sqrt(a);
4656
}
4657

    
4658
static float approx_rcp(float a)
4659
{
4660
    return 1.0 / a;
4661
}
4662

    
4663
#if !defined(CONFIG_USER_ONLY)
4664

    
4665
#define MMUSUFFIX _mmu
4666
#ifdef __s390__
4667
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4668
#else
4669
# define GETPC() (__builtin_return_address(0))
4670
#endif
4671

    
4672
#define SHIFT 0
4673
#include "softmmu_template.h"
4674

    
4675
#define SHIFT 1
4676
#include "softmmu_template.h"
4677

    
4678
#define SHIFT 2
4679
#include "softmmu_template.h"
4680

    
4681
#define SHIFT 3
4682
#include "softmmu_template.h"
4683

    
4684
#endif
4685

    
4686
/* try to fill the TLB and return an exception if error. If retaddr is
4687
   NULL, it means that the function was called in C code (i.e. not
4688
   from generated code or from helper.c) */
4689
/* XXX: fix it to restore all registers */
4690
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4691
{
4692
    TranslationBlock *tb;
4693
    int ret;
4694
    unsigned long pc;
4695
    CPUX86State *saved_env;
4696

    
4697
    /* XXX: hack to restore env in all cases, even if not called from
4698
       generated code */
4699
    saved_env = env;
4700
    env = cpu_single_env;
4701

    
4702
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4703
    if (ret) {
4704
        if (retaddr) {
4705
            /* now we have a real cpu fault */
4706
            pc = (unsigned long)retaddr;
4707
            tb = tb_find_pc(pc);
4708
            if (tb) {
4709
                /* the PC is inside the translated code. It means that we have
4710
                   a virtual CPU fault */
4711
                cpu_restore_state(tb, env, pc, NULL);
4712
            }
4713
        }
4714
        if (retaddr)
4715
            raise_exception_err(env->exception_index, env->error_code);
4716
        else
4717
            raise_exception_err_norestore(env->exception_index, env->error_code);
4718
    }
4719
    env = saved_env;
4720
}
4721

    
4722

    
4723
/* Secure Virtual Machine helpers */
4724

    
4725
void helper_stgi(void)
4726
{
4727
    env->hflags |= HF_GIF_MASK;
4728
}
4729

    
4730
void helper_clgi(void)
4731
{
4732
    env->hflags &= ~HF_GIF_MASK;
4733
}
4734

    
4735
#if defined(CONFIG_USER_ONLY)
4736

    
4737
void helper_vmrun(void) 
4738
{ 
4739
}
4740
void helper_vmmcall(void) 
4741
{ 
4742
}
4743
void helper_vmload(void) 
4744
{ 
4745
}
4746
void helper_vmsave(void) 
4747
{ 
4748
}
4749
void helper_skinit(void) 
4750
{ 
4751
}
4752
void helper_invlpga(void) 
4753
{ 
4754
}
4755
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4756
{ 
4757
}
4758
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4759
{
4760
}
4761

    
4762
void helper_svm_check_io(uint32_t port, uint32_t param, 
4763
                         uint32_t next_eip_addend)
4764
{
4765
}
4766
#else
4767

    
4768
static inline uint32_t
4769
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4770
{
4771
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4772
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4773
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4774
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4775
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4776
}
4777

    
4778
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4779
{
4780
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4781
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4782
}
4783

    
4784
void helper_vmrun(void)
4785
{
4786
    target_ulong addr;
4787
    uint32_t event_inj;
4788
    uint32_t int_ctl;
4789

    
4790
    addr = EAX;
4791
    if (loglevel & CPU_LOG_TB_IN_ASM)
4792
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4793

    
4794
    env->vm_vmcb = addr;
4795

    
4796
    /* save the current CPU state in the hsave page */
4797
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4798
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4799

    
4800
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4801
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4802

    
4803
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4804
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4805
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4806
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4807
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4808
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4809
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4810

    
4811
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4812
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4813

    
4814
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4815
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4816
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4817
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4818

    
4819
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4820
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4821
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4822

    
4823
    /* load the interception bitmaps so we do not need to access the
4824
       vmcb in svm mode */
4825
    /* We shift all the intercept bits so we can OR them with the TB
4826
       flags later on */
4827
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4828
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4829
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4830
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4831
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4832
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4833

    
4834
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4835
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4836

    
4837
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4838
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4839

    
4840
    /* clear exit_info_2 so we behave like the real hardware */
4841
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4842

    
4843
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4844
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4845
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4846
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4847
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4848
    if (int_ctl & V_INTR_MASKING_MASK) {
4849
        env->cr[8] = int_ctl & V_TPR_MASK;
4850
        cpu_set_apic_tpr(env, env->cr[8]);
4851
        if (env->eflags & IF_MASK)
4852
            env->hflags |= HF_HIF_MASK;
4853
    }
4854

    
4855
#ifdef TARGET_X86_64
4856
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4857
    env->hflags &= ~HF_LMA_MASK;
4858
    if (env->efer & MSR_EFER_LMA)
4859
       env->hflags |= HF_LMA_MASK;
4860
#endif
4861
    env->eflags = 0;
4862
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4863
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4864
    CC_OP = CC_OP_EFLAGS;
4865
    CC_DST = 0xffffffff;
4866

    
4867
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4868
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4869
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4870
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4871

    
4872
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4873
    env->eip = EIP;
4874
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4875
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4876
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4877
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4878
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4879

    
4880
    /* FIXME: guest state consistency checks */
4881

    
4882
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4883
        case TLB_CONTROL_DO_NOTHING:
4884
            break;
4885
        case TLB_CONTROL_FLUSH_ALL_ASID:
4886
            /* FIXME: this is not 100% correct but should work for now */
4887
            tlb_flush(env, 1);
4888
        break;
4889
    }
4890

    
4891
    helper_stgi();
4892

    
4893
    /* maybe we need to inject an event */
4894
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4895
    if (event_inj & SVM_EVTINJ_VALID) {
4896
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4897
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4898
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4899
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4900

    
4901
        if (loglevel & CPU_LOG_TB_IN_ASM)
4902
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4903
        /* FIXME: need to implement valid_err */
4904
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4905
        case SVM_EVTINJ_TYPE_INTR:
4906
                env->exception_index = vector;
4907
                env->error_code = event_inj_err;
4908
                env->exception_is_int = 0;
4909
                env->exception_next_eip = -1;
4910
                if (loglevel & CPU_LOG_TB_IN_ASM)
4911
                    fprintf(logfile, "INTR");
4912
                break;
4913
        case SVM_EVTINJ_TYPE_NMI:
4914
                env->exception_index = vector;
4915
                env->error_code = event_inj_err;
4916
                env->exception_is_int = 0;
4917
                env->exception_next_eip = EIP;
4918
                if (loglevel & CPU_LOG_TB_IN_ASM)
4919
                    fprintf(logfile, "NMI");
4920
                break;
4921
        case SVM_EVTINJ_TYPE_EXEPT:
4922
                env->exception_index = vector;
4923
                env->error_code = event_inj_err;
4924
                env->exception_is_int = 0;
4925
                env->exception_next_eip = -1;
4926
                if (loglevel & CPU_LOG_TB_IN_ASM)
4927
                    fprintf(logfile, "EXEPT");
4928
                break;
4929
        case SVM_EVTINJ_TYPE_SOFT:
4930
                env->exception_index = vector;
4931
                env->error_code = event_inj_err;
4932
                env->exception_is_int = 1;
4933
                env->exception_next_eip = EIP;
4934
                if (loglevel & CPU_LOG_TB_IN_ASM)
4935
                    fprintf(logfile, "SOFT");
4936
                break;
4937
        }
4938
        if (loglevel & CPU_LOG_TB_IN_ASM)
4939
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4940
    }
4941
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4942
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4943
    }
4944

    
4945
    cpu_loop_exit();
4946
}
4947

    
4948
void helper_vmmcall(void)
4949
{
4950
    if (loglevel & CPU_LOG_TB_IN_ASM)
4951
        fprintf(logfile,"vmmcall!\n");
4952
}
4953

    
4954
void helper_vmload(void)
4955
{
4956
    target_ulong addr;
4957
    addr = EAX;
4958
    if (loglevel & CPU_LOG_TB_IN_ASM)
4959
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4960
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4961
                env->segs[R_FS].base);
4962

    
4963
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4964
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4965
    SVM_LOAD_SEG2(addr, tr, tr);
4966
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4967

    
4968
#ifdef TARGET_X86_64
4969
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4970
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4971
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4972
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4973
#endif
4974
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4975
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4976
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4977
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4978
}
4979

    
4980
void helper_vmsave(void)
4981
{
4982
    target_ulong addr;
4983
    addr = EAX;
4984
    if (loglevel & CPU_LOG_TB_IN_ASM)
4985
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4986
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4987
                env->segs[R_FS].base);
4988

    
4989
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4990
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4991
    SVM_SAVE_SEG(addr, tr, tr);
4992
    SVM_SAVE_SEG(addr, ldt, ldtr);
4993

    
4994
#ifdef TARGET_X86_64
4995
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4996
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4997
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4998
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4999
#endif
5000
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5001
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5002
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5003
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5004
}
5005

    
5006
void helper_skinit(void)
5007
{
5008
    if (loglevel & CPU_LOG_TB_IN_ASM)
5009
        fprintf(logfile,"skinit!\n");
5010
}
5011

    
5012
void helper_invlpga(void)
5013
{
5014
    tlb_flush(env, 0);
5015
}
5016

    
5017
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5018
{
5019
    switch(type) {
5020
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5021
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
5022
            helper_vmexit(type, param);
5023
        }
5024
        break;
5025
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
5026
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
5027
            helper_vmexit(type, param);
5028
        }
5029
        break;
5030
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5031
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
5032
            helper_vmexit(type, param);
5033
        }
5034
        break;
5035
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
5036
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
5037
            helper_vmexit(type, param);
5038
        }
5039
        break;
5040
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5041
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5042
            helper_vmexit(type, param);
5043
        }
5044
        break;
5045
    case SVM_EXIT_IOIO:
5046
        break;
5047

    
5048
    case SVM_EXIT_MSR:
5049
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5050
            /* FIXME: this should be read in at vmrun (faster this way?) */
5051
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5052
            uint32_t t0, t1;
5053
            switch((uint32_t)ECX) {
5054
            case 0 ... 0x1fff:
5055
                t0 = (ECX * 2) % 8;
5056
                t1 = ECX / 8;
5057
                break;
5058
            case 0xc0000000 ... 0xc0001fff:
5059
                t0 = (8192 + ECX - 0xc0000000) * 2;
5060
                t1 = (t0 / 8);
5061
                t0 %= 8;
5062
                break;
5063
            case 0xc0010000 ... 0xc0011fff:
5064
                t0 = (16384 + ECX - 0xc0010000) * 2;
5065
                t1 = (t0 / 8);
5066
                t0 %= 8;
5067
                break;
5068
            default:
5069
                helper_vmexit(type, param);
5070
                t0 = 0;
5071
                t1 = 0;
5072
                break;
5073
            }
5074
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5075
                helper_vmexit(type, param);
5076
        }
5077
        break;
5078
    default:
5079
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5080
            helper_vmexit(type, param);
5081
        }
5082
        break;
5083
    }
5084
}
5085

    
5086
void helper_svm_check_io(uint32_t port, uint32_t param, 
5087
                         uint32_t next_eip_addend)
5088
{
5089
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5090
        /* FIXME: this should be read in at vmrun (faster this way?) */
5091
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5092
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5093
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5094
            /* next EIP */
5095
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5096
                     env->eip + next_eip_addend);
5097
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5098
        }
5099
    }
5100
}
5101

    
5102
/* Note: currently only 32 bits of exit_code are used */
5103
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5104
{
5105
    uint32_t int_ctl;
5106

    
5107
    if (loglevel & CPU_LOG_TB_IN_ASM)
5108
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5109
                exit_code, exit_info_1,
5110
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5111
                EIP);
5112

    
5113
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5114
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5115
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5116
    } else {
5117
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5118
    }
5119

    
5120
    /* Save the VM state in the vmcb */
5121
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5122
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5123
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5124
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5125

    
5126
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5127
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5128

    
5129
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5130
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5131

    
5132
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5133
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5134
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5135
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5136
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5137

    
5138
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5139
        int_ctl &= ~V_TPR_MASK;
5140
        int_ctl |= env->cr[8] & V_TPR_MASK;
5141
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5142
    }
5143

    
5144
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5145
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5146
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5147
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5148
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5149
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5150
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5151

    
5152
    /* Reload the host state from vm_hsave */
5153
    env->hflags &= ~HF_HIF_MASK;
5154
    env->intercept = 0;
5155
    env->intercept_exceptions = 0;
5156
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5157

    
5158
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5159
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5160

    
5161
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5162
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5163

    
5164
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5165
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5166
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5167
    if (int_ctl & V_INTR_MASKING_MASK) {
5168
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5169
        cpu_set_apic_tpr(env, env->cr[8]);
5170
    }
5171
    /* we need to set the efer after the crs so the hidden flags get set properly */
5172
#ifdef TARGET_X86_64
5173
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5174
    env->hflags &= ~HF_LMA_MASK;
5175
    if (env->efer & MSR_EFER_LMA)
5176
       env->hflags |= HF_LMA_MASK;
5177
#endif
5178

    
5179
    env->eflags = 0;
5180
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5181
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5182
    CC_OP = CC_OP_EFLAGS;
5183

    
5184
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5185
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5186
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5187
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5188

    
5189
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5190
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5191
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5192

    
5193
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5194
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5195

    
5196
    /* other setups */
5197
    cpu_x86_set_cpl(env, 0);
5198
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5199
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5200

    
5201
    helper_clgi();
5202
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5203

    
5204
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5205

    
5206
    /* Clears the TSC_OFFSET inside the processor. */
5207

    
5208
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5209
       from the page table indicated the host's CR3. If the PDPEs contain
5210
       illegal state, the processor causes a shutdown. */
5211

    
5212
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5213
    env->cr[0] |= CR0_PE_MASK;
5214
    env->eflags &= ~VM_MASK;
5215

    
5216
    /* Disables all breakpoints in the host DR7 register. */
5217

    
5218
    /* Checks the reloaded host state for consistency. */
5219

    
5220
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5221
       host's code segment or non-canonical (in the case of long mode), a
5222
       #GP fault is delivered inside the host.) */
5223

    
5224
    /* remove any pending exception */
5225
    env->exception_index = -1;
5226
    env->error_code = 0;
5227
    env->old_exception = -1;
5228

    
5229
    cpu_loop_exit();
5230
}
5231

    
5232
#endif
5233

    
5234
/* MMX/SSE */
5235
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5236
void helper_enter_mmx(void)
5237
{
5238
    env->fpstt = 0;
5239
    *(uint32_t *)(env->fptags) = 0;
5240
    *(uint32_t *)(env->fptags + 4) = 0;
5241
}
5242

    
5243
void helper_emms(void)
5244
{
5245
    /* set to empty state */
5246
    *(uint32_t *)(env->fptags) = 0x01010101;
5247
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5248
}
5249

    
5250
/* XXX: suppress */
5251
void helper_movq(uint64_t *d, uint64_t *s)
5252
{
5253
    *d = *s;
5254
}
5255

    
5256
#define SHIFT 0
5257
#include "ops_sse.h"
5258

    
5259
#define SHIFT 1
5260
#include "ops_sse.h"
5261

    
5262
#define SHIFT 0
5263
#include "helper_template.h"
5264
#undef SHIFT
5265

    
5266
#define SHIFT 1
5267
#include "helper_template.h"
5268
#undef SHIFT
5269

    
5270
#define SHIFT 2
5271
#include "helper_template.h"
5272
#undef SHIFT
5273

    
5274
#ifdef TARGET_X86_64
5275

    
5276
#define SHIFT 3
5277
#include "helper_template.h"
5278
#undef SHIFT
5279

    
5280
#endif
5281

    
5282
/* bit operations */
5283
target_ulong helper_bsf(target_ulong t0)
5284
{
5285
    int count;
5286
    target_ulong res;
5287

    
5288
    res = t0;
5289
    count = 0;
5290
    while ((res & 1) == 0) {
5291
        count++;
5292
        res >>= 1;
5293
    }
5294
    return count;
5295
}
5296

    
5297
target_ulong helper_bsr(target_ulong t0)
5298
{
5299
    int count;
5300
    target_ulong res, mask;
5301
    
5302
    res = t0;
5303
    count = TARGET_LONG_BITS - 1;
5304
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5305
    while ((res & mask) == 0) {
5306
        count--;
5307
        res <<= 1;
5308
    }
5309
    return count;
5310
}
5311

    
5312

    
5313
static int compute_all_eflags(void)
5314
{
5315
    return CC_SRC;
5316
}
5317

    
5318
static int compute_c_eflags(void)
5319
{
5320
    return CC_SRC & CC_C;
5321
}
5322

    
5323
CCTable cc_table[CC_OP_NB] = {
5324
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5325

    
5326
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5327

    
5328
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5329
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5330
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5331

    
5332
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5333
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5334
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5335

    
5336
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5337
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5338
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5339

    
5340
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5341
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5342
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5343

    
5344
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5345
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5346
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5347

    
5348
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5349
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5350
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5351

    
5352
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5353
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5354
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5355

    
5356
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5357
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5358
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5359

    
5360
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5361
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5362
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5363

    
5364
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5365
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5366
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5367

    
5368
#ifdef TARGET_X86_64
5369
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5370

    
5371
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5372

    
5373
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5374

    
5375
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5376

    
5377
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5378

    
5379
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5380

    
5381
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5382

    
5383
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5384

    
5385
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5386

    
5387
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5388
#endif
5389
};
5390