Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 1235fc06

History | View | Annotate | Download (151.9 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* XXX: add a is_user flag to have proper security support */
594
#define PUSHW(ssp, sp, sp_mask, val)\
595
{\
596
    sp -= 2;\
597
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598
}
599

    
600
#define PUSHL(ssp, sp, sp_mask, val)\
601
{\
602
    sp -= 4;\
603
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604
}
605

    
606
#define POPW(ssp, sp, sp_mask, val)\
607
{\
608
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609
    sp += 2;\
610
}
611

    
612
#define POPL(ssp, sp, sp_mask, val)\
613
{\
614
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615
    sp += 4;\
616
}
617

    
618
/* protected mode interrupt */
619
static void do_interrupt_protected(int intno, int is_int, int error_code,
620
                                   unsigned int next_eip, int is_hw)
621
{
622
    SegmentCache *dt;
623
    target_ulong ptr, ssp;
624
    int type, dpl, selector, ss_dpl, cpl;
625
    int has_error_code, new_stack, shift;
626
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627
    uint32_t old_eip, sp_mask;
628

    
629
    has_error_code = 0;
630
    if (!is_int && !is_hw) {
631
        switch(intno) {
632
        case 8:
633
        case 10:
634
        case 11:
635
        case 12:
636
        case 13:
637
        case 14:
638
        case 17:
639
            has_error_code = 1;
640
            break;
641
        }
642
    }
643
    if (is_int)
644
        old_eip = next_eip;
645
    else
646
        old_eip = env->eip;
647

    
648
    dt = &env->idt;
649
    if (intno * 8 + 7 > dt->limit)
650
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651
    ptr = dt->base + intno * 8;
652
    e1 = ldl_kernel(ptr);
653
    e2 = ldl_kernel(ptr + 4);
654
    /* check gate type */
655
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
656
    switch(type) {
657
    case 5: /* task gate */
658
        /* must do that check here to return the correct error code */
659
        if (!(e2 & DESC_P_MASK))
660
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662
        if (has_error_code) {
663
            int type;
664
            uint32_t mask;
665
            /* push the error code */
666
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
667
            shift = type >> 3;
668
            if (env->segs[R_SS].flags & DESC_B_MASK)
669
                mask = 0xffffffff;
670
            else
671
                mask = 0xffff;
672
            esp = (ESP - (2 << shift)) & mask;
673
            ssp = env->segs[R_SS].base + esp;
674
            if (shift)
675
                stl_kernel(ssp, error_code);
676
            else
677
                stw_kernel(ssp, error_code);
678
            SET_ESP(esp, mask);
679
        }
680
        return;
681
    case 6: /* 286 interrupt gate */
682
    case 7: /* 286 trap gate */
683
    case 14: /* 386 interrupt gate */
684
    case 15: /* 386 trap gate */
685
        break;
686
    default:
687
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688
        break;
689
    }
690
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691
    cpl = env->hflags & HF_CPL_MASK;
692
    /* check privilege if software int */
693
    if (is_int && dpl < cpl)
694
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695
    /* check valid bit */
696
    if (!(e2 & DESC_P_MASK))
697
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
698
    selector = e1 >> 16;
699
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700
    if ((selector & 0xfffc) == 0)
701
        raise_exception_err(EXCP0D_GPF, 0);
702

    
703
    if (load_segment(&e1, &e2, selector) != 0)
704
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708
    if (dpl > cpl)
709
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710
    if (!(e2 & DESC_P_MASK))
711
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713
        /* to inner privilege */
714
        get_ss_esp_from_tss(&ss, &esp, dpl);
715
        if ((ss & 0xfffc) == 0)
716
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717
        if ((ss & 3) != dpl)
718
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
722
        if (ss_dpl != dpl)
723
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724
        if (!(ss_e2 & DESC_S_MASK) ||
725
            (ss_e2 & DESC_CS_MASK) ||
726
            !(ss_e2 & DESC_W_MASK))
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if (!(ss_e2 & DESC_P_MASK))
729
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730
        new_stack = 1;
731
        sp_mask = get_sp_mask(ss_e2);
732
        ssp = get_seg_base(ss_e1, ss_e2);
733
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734
        /* to same privilege */
735
        if (env->eflags & VM_MASK)
736
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737
        new_stack = 0;
738
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
739
        ssp = env->segs[R_SS].base;
740
        esp = ESP;
741
        dpl = cpl;
742
    } else {
743
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744
        new_stack = 0; /* avoid warning */
745
        sp_mask = 0; /* avoid warning */
746
        ssp = 0; /* avoid warning */
747
        esp = 0; /* avoid warning */
748
    }
749

    
750
    shift = type >> 3;
751

    
752
#if 0
753
    /* XXX: check that enough room is available */
754
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755
    if (env->eflags & VM_MASK)
756
        push_size += 8;
757
    push_size <<= shift;
758
#endif
759
    if (shift == 1) {
760
        if (new_stack) {
761
            if (env->eflags & VM_MASK) {
762
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766
            }
767
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768
            PUSHL(ssp, esp, sp_mask, ESP);
769
        }
770
        PUSHL(ssp, esp, sp_mask, compute_eflags());
771
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772
        PUSHL(ssp, esp, sp_mask, old_eip);
773
        if (has_error_code) {
774
            PUSHL(ssp, esp, sp_mask, error_code);
775
        }
776
    } else {
777
        if (new_stack) {
778
            if (env->eflags & VM_MASK) {
779
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783
            }
784
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785
            PUSHW(ssp, esp, sp_mask, ESP);
786
        }
787
        PUSHW(ssp, esp, sp_mask, compute_eflags());
788
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789
        PUSHW(ssp, esp, sp_mask, old_eip);
790
        if (has_error_code) {
791
            PUSHW(ssp, esp, sp_mask, error_code);
792
        }
793
    }
794

    
795
    if (new_stack) {
796
        if (env->eflags & VM_MASK) {
797
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801
        }
802
        ss = (ss & ~3) | dpl;
803
        cpu_x86_load_seg_cache(env, R_SS, ss,
804
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805
    }
806
    SET_ESP(esp, sp_mask);
807

    
808
    selector = (selector & ~3) | dpl;
809
    cpu_x86_load_seg_cache(env, R_CS, selector,
810
                   get_seg_base(e1, e2),
811
                   get_seg_limit(e1, e2),
812
                   e2);
813
    cpu_x86_set_cpl(env, dpl);
814
    env->eip = offset;
815

    
816
    /* interrupt gate clear IF mask */
817
    if ((type & 1) == 0) {
818
        env->eflags &= ~IF_MASK;
819
    }
820
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
821
}
822

    
823
#ifdef TARGET_X86_64
824

    
825
#define PUSHQ(sp, val)\
826
{\
827
    sp -= 8;\
828
    stq_kernel(sp, (val));\
829
}
830

    
831
#define POPQ(sp, val)\
832
{\
833
    val = ldq_kernel(sp);\
834
    sp += 8;\
835
}
836

    
837
static inline target_ulong get_rsp_from_tss(int level)
838
{
839
    int index;
840

    
841
#if 0
842
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843
           env->tr.base, env->tr.limit);
844
#endif
845

    
846
    if (!(env->tr.flags & DESC_P_MASK))
847
        cpu_abort(env, "invalid tss");
848
    index = 8 * level + 4;
849
    if ((index + 7) > env->tr.limit)
850
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851
    return ldq_kernel(env->tr.base + index);
852
}
853

    
854
/* 64 bit interrupt */
855
static void do_interrupt64(int intno, int is_int, int error_code,
856
                           target_ulong next_eip, int is_hw)
857
{
858
    SegmentCache *dt;
859
    target_ulong ptr;
860
    int type, dpl, selector, cpl, ist;
861
    int has_error_code, new_stack;
862
    uint32_t e1, e2, e3, ss;
863
    target_ulong old_eip, esp, offset;
864

    
865
    has_error_code = 0;
866
    if (!is_int && !is_hw) {
867
        switch(intno) {
868
        case 8:
869
        case 10:
870
        case 11:
871
        case 12:
872
        case 13:
873
        case 14:
874
        case 17:
875
            has_error_code = 1;
876
            break;
877
        }
878
    }
879
    if (is_int)
880
        old_eip = next_eip;
881
    else
882
        old_eip = env->eip;
883

    
884
    dt = &env->idt;
885
    if (intno * 16 + 15 > dt->limit)
886
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887
    ptr = dt->base + intno * 16;
888
    e1 = ldl_kernel(ptr);
889
    e2 = ldl_kernel(ptr + 4);
890
    e3 = ldl_kernel(ptr + 8);
891
    /* check gate type */
892
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893
    switch(type) {
894
    case 14: /* 386 interrupt gate */
895
    case 15: /* 386 trap gate */
896
        break;
897
    default:
898
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
899
        break;
900
    }
901
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902
    cpl = env->hflags & HF_CPL_MASK;
903
    /* check privilege if software int */
904
    if (is_int && dpl < cpl)
905
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
    /* check valid bit */
907
    if (!(e2 & DESC_P_MASK))
908
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
909
    selector = e1 >> 16;
910
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911
    ist = e2 & 7;
912
    if ((selector & 0xfffc) == 0)
913
        raise_exception_err(EXCP0D_GPF, 0);
914

    
915
    if (load_segment(&e1, &e2, selector) != 0)
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920
    if (dpl > cpl)
921
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927
        /* to inner privilege */
928
        if (ist != 0)
929
            esp = get_rsp_from_tss(ist + 3);
930
        else
931
            esp = get_rsp_from_tss(dpl);
932
        esp &= ~0xfLL; /* align stack */
933
        ss = 0;
934
        new_stack = 1;
935
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936
        /* to same privilege */
937
        if (env->eflags & VM_MASK)
938
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0;
940
        if (ist != 0)
941
            esp = get_rsp_from_tss(ist + 3);
942
        else
943
            esp = ESP;
944
        esp &= ~0xfLL; /* align stack */
945
        dpl = cpl;
946
    } else {
947
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948
        new_stack = 0; /* avoid warning */
949
        esp = 0; /* avoid warning */
950
    }
951

    
952
    PUSHQ(esp, env->segs[R_SS].selector);
953
    PUSHQ(esp, ESP);
954
    PUSHQ(esp, compute_eflags());
955
    PUSHQ(esp, env->segs[R_CS].selector);
956
    PUSHQ(esp, old_eip);
957
    if (has_error_code) {
958
        PUSHQ(esp, error_code);
959
    }
960

    
961
    if (new_stack) {
962
        ss = 0 | dpl;
963
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964
    }
965
    ESP = esp;
966

    
967
    selector = (selector & ~3) | dpl;
968
    cpu_x86_load_seg_cache(env, R_CS, selector,
969
                   get_seg_base(e1, e2),
970
                   get_seg_limit(e1, e2),
971
                   e2);
972
    cpu_x86_set_cpl(env, dpl);
973
    env->eip = offset;
974

    
975
    /* interrupt gate clear IF mask */
976
    if ((type & 1) == 0) {
977
        env->eflags &= ~IF_MASK;
978
    }
979
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
980
}
981
#endif
982

    
983
#if defined(CONFIG_USER_ONLY)
984
void helper_syscall(int next_eip_addend)
985
{
986
    env->exception_index = EXCP_SYSCALL;
987
    env->exception_next_eip = env->eip + next_eip_addend;
988
    cpu_loop_exit();
989
}
990
#else
991
void helper_syscall(int next_eip_addend)
992
{
993
    int selector;
994

    
995
    if (!(env->efer & MSR_EFER_SCE)) {
996
        raise_exception_err(EXCP06_ILLOP, 0);
997
    }
998
    selector = (env->star >> 32) & 0xffff;
999
#ifdef TARGET_X86_64
1000
    if (env->hflags & HF_LMA_MASK) {
1001
        int code64;
1002

    
1003
        ECX = env->eip + next_eip_addend;
1004
        env->regs[11] = compute_eflags();
1005

    
1006
        code64 = env->hflags & HF_CS64_MASK;
1007

    
1008
        cpu_x86_set_cpl(env, 0);
1009
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010
                           0, 0xffffffff,
1011
                               DESC_G_MASK | DESC_P_MASK |
1012
                               DESC_S_MASK |
1013
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015
                               0, 0xffffffff,
1016
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017
                               DESC_S_MASK |
1018
                               DESC_W_MASK | DESC_A_MASK);
1019
        env->eflags &= ~env->fmask;
1020
        load_eflags(env->eflags, 0);
1021
        if (code64)
1022
            env->eip = env->lstar;
1023
        else
1024
            env->eip = env->cstar;
1025
    } else
1026
#endif
1027
    {
1028
        ECX = (uint32_t)(env->eip + next_eip_addend);
1029

    
1030
        cpu_x86_set_cpl(env, 0);
1031
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032
                           0, 0xffffffff,
1033
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034
                               DESC_S_MASK |
1035
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037
                               0, 0xffffffff,
1038
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039
                               DESC_S_MASK |
1040
                               DESC_W_MASK | DESC_A_MASK);
1041
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042
        env->eip = (uint32_t)env->star;
1043
    }
1044
}
1045
#endif
1046

    
1047
void helper_sysret(int dflag)
1048
{
1049
    int cpl, selector;
1050

    
1051
    if (!(env->efer & MSR_EFER_SCE)) {
1052
        raise_exception_err(EXCP06_ILLOP, 0);
1053
    }
1054
    cpl = env->hflags & HF_CPL_MASK;
1055
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056
        raise_exception_err(EXCP0D_GPF, 0);
1057
    }
1058
    selector = (env->star >> 48) & 0xffff;
1059
#ifdef TARGET_X86_64
1060
    if (env->hflags & HF_LMA_MASK) {
1061
        if (dflag == 2) {
1062
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063
                                   0, 0xffffffff,
1064
                                   DESC_G_MASK | DESC_P_MASK |
1065
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067
                                   DESC_L_MASK);
1068
            env->eip = ECX;
1069
        } else {
1070
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071
                                   0, 0xffffffff,
1072
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075
            env->eip = (uint32_t)ECX;
1076
        }
1077
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1078
                               0, 0xffffffff,
1079
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                               DESC_W_MASK | DESC_A_MASK);
1082
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084
        cpu_x86_set_cpl(env, 3);
1085
    } else
1086
#endif
1087
    {
1088
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089
                               0, 0xffffffff,
1090
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093
        env->eip = (uint32_t)ECX;
1094
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095
                               0, 0xffffffff,
1096
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098
                               DESC_W_MASK | DESC_A_MASK);
1099
        env->eflags |= IF_MASK;
1100
        cpu_x86_set_cpl(env, 3);
1101
    }
1102
#ifdef USE_KQEMU
1103
    if (kqemu_is_ok(env)) {
1104
        if (env->hflags & HF_LMA_MASK)
1105
            CC_OP = CC_OP_EFLAGS;
1106
        env->exception_index = -1;
1107
        cpu_loop_exit();
1108
    }
1109
#endif
1110
}
1111

    
1112
/* real mode interrupt */
1113
static void do_interrupt_real(int intno, int is_int, int error_code,
1114
                              unsigned int next_eip)
1115
{
1116
    SegmentCache *dt;
1117
    target_ulong ptr, ssp;
1118
    int selector;
1119
    uint32_t offset, esp;
1120
    uint32_t old_cs, old_eip;
1121

    
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privilege if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    int first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    int second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    } else {
1280
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1281
    }
1282

    
1283
    env->exception_index = intno;
1284
    env->error_code = error_code;
1285
    env->exception_is_int = is_int;
1286
    env->exception_next_eip = env->eip + next_eip_addend;
1287
    cpu_loop_exit();
1288
}
1289

    
1290
/* shortcuts to generate exceptions */
1291

    
1292
void (raise_exception_err)(int exception_index, int error_code)
1293
{
1294
    raise_interrupt(exception_index, 0, error_code, 0);
1295
}
1296

    
1297
void raise_exception(int exception_index)
1298
{
1299
    raise_interrupt(exception_index, 0, 0, 0);
1300
}
1301

    
1302
/* SMM support */
1303

    
1304
#if defined(CONFIG_USER_ONLY)
1305

    
1306
void do_smm_enter(void)
1307
{
1308
}
1309

    
1310
void helper_rsm(void)
1311
{
1312
}
1313

    
1314
#else
1315

    
1316
#ifdef TARGET_X86_64
1317
#define SMM_REVISION_ID 0x00020064
1318
#else
1319
#define SMM_REVISION_ID 0x00020000
1320
#endif
1321

    
1322
void do_smm_enter(void)
1323
{
1324
    target_ulong sm_state;
1325
    SegmentCache *dt;
1326
    int i, offset;
1327

    
1328
    if (loglevel & CPU_LOG_INT) {
1329
        fprintf(logfile, "SMM: enter\n");
1330
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1331
    }
1332

    
1333
    env->hflags |= HF_SMM_MASK;
1334
    cpu_smm_update(env);
1335

    
1336
    sm_state = env->smbase + 0x8000;
1337

    
1338
#ifdef TARGET_X86_64
1339
    for(i = 0; i < 6; i++) {
1340
        dt = &env->segs[i];
1341
        offset = 0x7e00 + i * 16;
1342
        stw_phys(sm_state + offset, dt->selector);
1343
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344
        stl_phys(sm_state + offset + 4, dt->limit);
1345
        stq_phys(sm_state + offset + 8, dt->base);
1346
    }
1347

    
1348
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1349
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1350

    
1351
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1353
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1355

    
1356
    stq_phys(sm_state + 0x7e88, env->idt.base);
1357
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1358

    
1359
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1360
    stq_phys(sm_state + 0x7e98, env->tr.base);
1361
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1362
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1363

    
1364
    stq_phys(sm_state + 0x7ed0, env->efer);
1365

    
1366
    stq_phys(sm_state + 0x7ff8, EAX);
1367
    stq_phys(sm_state + 0x7ff0, ECX);
1368
    stq_phys(sm_state + 0x7fe8, EDX);
1369
    stq_phys(sm_state + 0x7fe0, EBX);
1370
    stq_phys(sm_state + 0x7fd8, ESP);
1371
    stq_phys(sm_state + 0x7fd0, EBP);
1372
    stq_phys(sm_state + 0x7fc8, ESI);
1373
    stq_phys(sm_state + 0x7fc0, EDI);
1374
    for(i = 8; i < 16; i++)
1375
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376
    stq_phys(sm_state + 0x7f78, env->eip);
1377
    stl_phys(sm_state + 0x7f70, compute_eflags());
1378
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1379
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1380

    
1381
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1382
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1383
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1384

    
1385
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386
    stl_phys(sm_state + 0x7f00, env->smbase);
1387
#else
1388
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1391
    stl_phys(sm_state + 0x7ff0, env->eip);
1392
    stl_phys(sm_state + 0x7fec, EDI);
1393
    stl_phys(sm_state + 0x7fe8, ESI);
1394
    stl_phys(sm_state + 0x7fe4, EBP);
1395
    stl_phys(sm_state + 0x7fe0, ESP);
1396
    stl_phys(sm_state + 0x7fdc, EBX);
1397
    stl_phys(sm_state + 0x7fd8, EDX);
1398
    stl_phys(sm_state + 0x7fd4, ECX);
1399
    stl_phys(sm_state + 0x7fd0, EAX);
1400
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1402

    
1403
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404
    stl_phys(sm_state + 0x7f64, env->tr.base);
1405
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1406
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1407

    
1408
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1410
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1412

    
1413
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1414
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1415

    
1416
    stl_phys(sm_state + 0x7f58, env->idt.base);
1417
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1418

    
1419
    for(i = 0; i < 6; i++) {
1420
        dt = &env->segs[i];
1421
        if (i < 3)
1422
            offset = 0x7f84 + i * 12;
1423
        else
1424
            offset = 0x7f2c + (i - 3) * 12;
1425
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426
        stl_phys(sm_state + offset + 8, dt->base);
1427
        stl_phys(sm_state + offset + 4, dt->limit);
1428
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1429
    }
1430
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1431

    
1432
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433
    stl_phys(sm_state + 0x7ef8, env->smbase);
1434
#endif
1435
    /* init SMM cpu state */
1436

    
1437
#ifdef TARGET_X86_64
1438
    env->efer = 0;
1439
    env->hflags &= ~HF_LMA_MASK;
1440
#endif
1441
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1442
    env->eip = 0x00008000;
1443
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1444
                           0xffffffff, 0);
1445
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1446
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1447
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1448
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1449
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1450

    
1451
    cpu_x86_update_cr0(env,
1452
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1453
    cpu_x86_update_cr4(env, 0);
1454
    env->dr[7] = 0x00000400;
1455
    CC_OP = CC_OP_EFLAGS;
1456
}
1457

    
1458
void helper_rsm(void)
1459
{
1460
    target_ulong sm_state;
1461
    int i, offset;
1462
    uint32_t val;
1463

    
1464
    sm_state = env->smbase + 0x8000;
1465
#ifdef TARGET_X86_64
1466
    env->efer = ldq_phys(sm_state + 0x7ed0);
1467
    if (env->efer & MSR_EFER_LMA)
1468
        env->hflags |= HF_LMA_MASK;
1469
    else
1470
        env->hflags &= ~HF_LMA_MASK;
1471

    
1472
    for(i = 0; i < 6; i++) {
1473
        offset = 0x7e00 + i * 16;
1474
        cpu_x86_load_seg_cache(env, i,
1475
                               lduw_phys(sm_state + offset),
1476
                               ldq_phys(sm_state + offset + 8),
1477
                               ldl_phys(sm_state + offset + 4),
1478
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1479
    }
1480

    
1481
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1482
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1483

    
1484
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1485
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1486
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1487
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1488

    
1489
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1490
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1491

    
1492
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1493
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1494
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1495
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1496

    
1497
    EAX = ldq_phys(sm_state + 0x7ff8);
1498
    ECX = ldq_phys(sm_state + 0x7ff0);
1499
    EDX = ldq_phys(sm_state + 0x7fe8);
1500
    EBX = ldq_phys(sm_state + 0x7fe0);
1501
    ESP = ldq_phys(sm_state + 0x7fd8);
1502
    EBP = ldq_phys(sm_state + 0x7fd0);
1503
    ESI = ldq_phys(sm_state + 0x7fc8);
1504
    EDI = ldq_phys(sm_state + 0x7fc0);
1505
    for(i = 8; i < 16; i++)
1506
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1507
    env->eip = ldq_phys(sm_state + 0x7f78);
1508
    load_eflags(ldl_phys(sm_state + 0x7f70),
1509
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1510
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1511
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1512

    
1513
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1514
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1515
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1516

    
1517
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1518
    if (val & 0x20000) {
1519
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1520
    }
1521
#else
1522
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1523
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1524
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1525
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1526
    env->eip = ldl_phys(sm_state + 0x7ff0);
1527
    EDI = ldl_phys(sm_state + 0x7fec);
1528
    ESI = ldl_phys(sm_state + 0x7fe8);
1529
    EBP = ldl_phys(sm_state + 0x7fe4);
1530
    ESP = ldl_phys(sm_state + 0x7fe0);
1531
    EBX = ldl_phys(sm_state + 0x7fdc);
1532
    EDX = ldl_phys(sm_state + 0x7fd8);
1533
    ECX = ldl_phys(sm_state + 0x7fd4);
1534
    EAX = ldl_phys(sm_state + 0x7fd0);
1535
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1536
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1537

    
1538
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1539
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1540
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1541
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1542

    
1543
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1544
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1545
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1546
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1547

    
1548
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1549
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1550

    
1551
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1552
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1553

    
1554
    for(i = 0; i < 6; i++) {
1555
        if (i < 3)
1556
            offset = 0x7f84 + i * 12;
1557
        else
1558
            offset = 0x7f2c + (i - 3) * 12;
1559
        cpu_x86_load_seg_cache(env, i,
1560
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1561
                               ldl_phys(sm_state + offset + 8),
1562
                               ldl_phys(sm_state + offset + 4),
1563
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1564
    }
1565
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1566

    
1567
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1568
    if (val & 0x20000) {
1569
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1570
    }
1571
#endif
1572
    CC_OP = CC_OP_EFLAGS;
1573
    env->hflags &= ~HF_SMM_MASK;
1574
    cpu_smm_update(env);
1575

    
1576
    if (loglevel & CPU_LOG_INT) {
1577
        fprintf(logfile, "SMM: after RSM\n");
1578
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1579
    }
1580
}
1581

    
1582
#endif /* !CONFIG_USER_ONLY */
1583

    
1584

    
1585
/* division, flags are undefined */
1586

    
1587
void helper_divb_AL(target_ulong t0)
1588
{
1589
    unsigned int num, den, q, r;
1590

    
1591
    num = (EAX & 0xffff);
1592
    den = (t0 & 0xff);
1593
    if (den == 0) {
1594
        raise_exception(EXCP00_DIVZ);
1595
    }
1596
    q = (num / den);
1597
    if (q > 0xff)
1598
        raise_exception(EXCP00_DIVZ);
1599
    q &= 0xff;
1600
    r = (num % den) & 0xff;
1601
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1602
}
1603

    
1604
void helper_idivb_AL(target_ulong t0)
1605
{
1606
    int num, den, q, r;
1607

    
1608
    num = (int16_t)EAX;
1609
    den = (int8_t)t0;
1610
    if (den == 0) {
1611
        raise_exception(EXCP00_DIVZ);
1612
    }
1613
    q = (num / den);
1614
    if (q != (int8_t)q)
1615
        raise_exception(EXCP00_DIVZ);
1616
    q &= 0xff;
1617
    r = (num % den) & 0xff;
1618
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1619
}
1620

    
1621
void helper_divw_AX(target_ulong t0)
1622
{
1623
    unsigned int num, den, q, r;
1624

    
1625
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1626
    den = (t0 & 0xffff);
1627
    if (den == 0) {
1628
        raise_exception(EXCP00_DIVZ);
1629
    }
1630
    q = (num / den);
1631
    if (q > 0xffff)
1632
        raise_exception(EXCP00_DIVZ);
1633
    q &= 0xffff;
1634
    r = (num % den) & 0xffff;
1635
    EAX = (EAX & ~0xffff) | q;
1636
    EDX = (EDX & ~0xffff) | r;
1637
}
1638

    
1639
void helper_idivw_AX(target_ulong t0)
1640
{
1641
    int num, den, q, r;
1642

    
1643
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1644
    den = (int16_t)t0;
1645
    if (den == 0) {
1646
        raise_exception(EXCP00_DIVZ);
1647
    }
1648
    q = (num / den);
1649
    if (q != (int16_t)q)
1650
        raise_exception(EXCP00_DIVZ);
1651
    q &= 0xffff;
1652
    r = (num % den) & 0xffff;
1653
    EAX = (EAX & ~0xffff) | q;
1654
    EDX = (EDX & ~0xffff) | r;
1655
}
1656

    
1657
void helper_divl_EAX(target_ulong t0)
1658
{
1659
    unsigned int den, r;
1660
    uint64_t num, q;
1661

    
1662
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1663
    den = t0;
1664
    if (den == 0) {
1665
        raise_exception(EXCP00_DIVZ);
1666
    }
1667
    q = (num / den);
1668
    r = (num % den);
1669
    if (q > 0xffffffff)
1670
        raise_exception(EXCP00_DIVZ);
1671
    EAX = (uint32_t)q;
1672
    EDX = (uint32_t)r;
1673
}
1674

    
1675
void helper_idivl_EAX(target_ulong t0)
1676
{
1677
    int den, r;
1678
    int64_t num, q;
1679

    
1680
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1681
    den = t0;
1682
    if (den == 0) {
1683
        raise_exception(EXCP00_DIVZ);
1684
    }
1685
    q = (num / den);
1686
    r = (num % den);
1687
    if (q != (int32_t)q)
1688
        raise_exception(EXCP00_DIVZ);
1689
    EAX = (uint32_t)q;
1690
    EDX = (uint32_t)r;
1691
}
1692

    
1693
/* bcd */
1694

    
1695
/* XXX: exception */
1696
void helper_aam(int base)
1697
{
1698
    int al, ah;
1699
    al = EAX & 0xff;
1700
    ah = al / base;
1701
    al = al % base;
1702
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1703
    CC_DST = al;
1704
}
1705

    
1706
void helper_aad(int base)
1707
{
1708
    int al, ah;
1709
    al = EAX & 0xff;
1710
    ah = (EAX >> 8) & 0xff;
1711
    al = ((ah * base) + al) & 0xff;
1712
    EAX = (EAX & ~0xffff) | al;
1713
    CC_DST = al;
1714
}
1715

    
1716
void helper_aaa(void)
1717
{
1718
    int icarry;
1719
    int al, ah, af;
1720
    int eflags;
1721

    
1722
    eflags = cc_table[CC_OP].compute_all();
1723
    af = eflags & CC_A;
1724
    al = EAX & 0xff;
1725
    ah = (EAX >> 8) & 0xff;
1726

    
1727
    icarry = (al > 0xf9);
1728
    if (((al & 0x0f) > 9 ) || af) {
1729
        al = (al + 6) & 0x0f;
1730
        ah = (ah + 1 + icarry) & 0xff;
1731
        eflags |= CC_C | CC_A;
1732
    } else {
1733
        eflags &= ~(CC_C | CC_A);
1734
        al &= 0x0f;
1735
    }
1736
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1737
    CC_SRC = eflags;
1738
    FORCE_RET();
1739
}
1740

    
1741
void helper_aas(void)
1742
{
1743
    int icarry;
1744
    int al, ah, af;
1745
    int eflags;
1746

    
1747
    eflags = cc_table[CC_OP].compute_all();
1748
    af = eflags & CC_A;
1749
    al = EAX & 0xff;
1750
    ah = (EAX >> 8) & 0xff;
1751

    
1752
    icarry = (al < 6);
1753
    if (((al & 0x0f) > 9 ) || af) {
1754
        al = (al - 6) & 0x0f;
1755
        ah = (ah - 1 - icarry) & 0xff;
1756
        eflags |= CC_C | CC_A;
1757
    } else {
1758
        eflags &= ~(CC_C | CC_A);
1759
        al &= 0x0f;
1760
    }
1761
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1762
    CC_SRC = eflags;
1763
    FORCE_RET();
1764
}
1765

    
1766
void helper_daa(void)
1767
{
1768
    int al, af, cf;
1769
    int eflags;
1770

    
1771
    eflags = cc_table[CC_OP].compute_all();
1772
    cf = eflags & CC_C;
1773
    af = eflags & CC_A;
1774
    al = EAX & 0xff;
1775

    
1776
    eflags = 0;
1777
    if (((al & 0x0f) > 9 ) || af) {
1778
        al = (al + 6) & 0xff;
1779
        eflags |= CC_A;
1780
    }
1781
    if ((al > 0x9f) || cf) {
1782
        al = (al + 0x60) & 0xff;
1783
        eflags |= CC_C;
1784
    }
1785
    EAX = (EAX & ~0xff) | al;
1786
    /* well, speed is not an issue here, so we compute the flags by hand */
1787
    eflags |= (al == 0) << 6; /* zf */
1788
    eflags |= parity_table[al]; /* pf */
1789
    eflags |= (al & 0x80); /* sf */
1790
    CC_SRC = eflags;
1791
    FORCE_RET();
1792
}
1793

    
1794
void helper_das(void)
1795
{
1796
    int al, al1, af, cf;
1797
    int eflags;
1798

    
1799
    eflags = cc_table[CC_OP].compute_all();
1800
    cf = eflags & CC_C;
1801
    af = eflags & CC_A;
1802
    al = EAX & 0xff;
1803

    
1804
    eflags = 0;
1805
    al1 = al;
1806
    if (((al & 0x0f) > 9 ) || af) {
1807
        eflags |= CC_A;
1808
        if (al < 6 || cf)
1809
            eflags |= CC_C;
1810
        al = (al - 6) & 0xff;
1811
    }
1812
    if ((al1 > 0x99) || cf) {
1813
        al = (al - 0x60) & 0xff;
1814
        eflags |= CC_C;
1815
    }
1816
    EAX = (EAX & ~0xff) | al;
1817
    /* well, speed is not an issue here, so we compute the flags by hand */
1818
    eflags |= (al == 0) << 6; /* zf */
1819
    eflags |= parity_table[al]; /* pf */
1820
    eflags |= (al & 0x80); /* sf */
1821
    CC_SRC = eflags;
1822
    FORCE_RET();
1823
}
1824

    
1825
void helper_into(int next_eip_addend)
1826
{
1827
    int eflags;
1828
    eflags = cc_table[CC_OP].compute_all();
1829
    if (eflags & CC_O) {
1830
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1831
    }
1832
}
1833

    
1834
void helper_cmpxchg8b(target_ulong a0)
1835
{
1836
    uint64_t d;
1837
    int eflags;
1838

    
1839
    eflags = cc_table[CC_OP].compute_all();
1840
    d = ldq(a0);
1841
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1842
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1843
        eflags |= CC_Z;
1844
    } else {
1845
        EDX = (uint32_t)(d >> 32);
1846
        EAX = (uint32_t)d;
1847
        eflags &= ~CC_Z;
1848
    }
1849
    CC_SRC = eflags;
1850
}
1851

    
1852
#ifdef TARGET_X86_64
1853
void helper_cmpxchg16b(target_ulong a0)
1854
{
1855
    uint64_t d0, d1;
1856
    int eflags;
1857

    
1858
    eflags = cc_table[CC_OP].compute_all();
1859
    d0 = ldq(a0);
1860
    d1 = ldq(a0 + 8);
1861
    if (d0 == EAX && d1 == EDX) {
1862
        stq(a0, EBX);
1863
        stq(a0 + 8, ECX);
1864
        eflags |= CC_Z;
1865
    } else {
1866
        EDX = d1;
1867
        EAX = d0;
1868
        eflags &= ~CC_Z;
1869
    }
1870
    CC_SRC = eflags;
1871
}
1872
#endif
1873

    
1874
void helper_single_step(void)
1875
{
1876
    env->dr[6] |= 0x4000;
1877
    raise_exception(EXCP01_SSTP);
1878
}
1879

    
1880
void helper_cpuid(void)
1881
{
1882
    uint32_t index;
1883

    
1884
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1885
    
1886
    index = (uint32_t)EAX;
1887
    /* test if maximum index reached */
1888
    if (index & 0x80000000) {
1889
        if (index > env->cpuid_xlevel)
1890
            index = env->cpuid_level;
1891
    } else {
1892
        if (index > env->cpuid_level)
1893
            index = env->cpuid_level;
1894
    }
1895

    
1896
    switch(index) {
1897
    case 0:
1898
        EAX = env->cpuid_level;
1899
        EBX = env->cpuid_vendor1;
1900
        EDX = env->cpuid_vendor2;
1901
        ECX = env->cpuid_vendor3;
1902
        break;
1903
    case 1:
1904
        EAX = env->cpuid_version;
1905
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1906
        ECX = env->cpuid_ext_features;
1907
        EDX = env->cpuid_features;
1908
        break;
1909
    case 2:
1910
        /* cache info: needed for Pentium Pro compatibility */
1911
        EAX = 1;
1912
        EBX = 0;
1913
        ECX = 0;
1914
        EDX = 0x2c307d;
1915
        break;
1916
    case 0x80000000:
1917
        EAX = env->cpuid_xlevel;
1918
        EBX = env->cpuid_vendor1;
1919
        EDX = env->cpuid_vendor2;
1920
        ECX = env->cpuid_vendor3;
1921
        break;
1922
    case 0x80000001:
1923
        EAX = env->cpuid_features;
1924
        EBX = 0;
1925
        ECX = env->cpuid_ext3_features;
1926
        EDX = env->cpuid_ext2_features;
1927
        break;
1928
    case 0x80000002:
1929
    case 0x80000003:
1930
    case 0x80000004:
1931
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1932
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1933
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1934
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1935
        break;
1936
    case 0x80000005:
1937
        /* cache info (L1 cache) */
1938
        EAX = 0x01ff01ff;
1939
        EBX = 0x01ff01ff;
1940
        ECX = 0x40020140;
1941
        EDX = 0x40020140;
1942
        break;
1943
    case 0x80000006:
1944
        /* cache info (L2 cache) */
1945
        EAX = 0;
1946
        EBX = 0x42004200;
1947
        ECX = 0x02008140;
1948
        EDX = 0;
1949
        break;
1950
    case 0x80000008:
1951
        /* virtual & phys address size in low 2 bytes. */
1952
/* XXX: This value must match the one used in the MMU code. */ 
1953
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1954
            /* 64 bit processor */
1955
#if defined(USE_KQEMU)
1956
            EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1957
#else
1958
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1959
            EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1960
#endif
1961
        } else {
1962
#if defined(USE_KQEMU)
1963
            EAX = 0x00000020;        /* 32 bits physical */
1964
#else
1965
            EAX = 0x00000024;        /* 36 bits physical */
1966
#endif
1967
        }
1968
        EBX = 0;
1969
        ECX = 0;
1970
        EDX = 0;
1971
        break;
1972
    case 0x8000000A:
1973
        EAX = 0x00000001;
1974
        EBX = 0;
1975
        ECX = 0;
1976
        EDX = 0;
1977
        break;
1978
    default:
1979
        /* reserved values: zero */
1980
        EAX = 0;
1981
        EBX = 0;
1982
        ECX = 0;
1983
        EDX = 0;
1984
        break;
1985
    }
1986
}
1987

    
1988
void helper_enter_level(int level, int data32, target_ulong t1)
1989
{
1990
    target_ulong ssp;
1991
    uint32_t esp_mask, esp, ebp;
1992

    
1993
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1994
    ssp = env->segs[R_SS].base;
1995
    ebp = EBP;
1996
    esp = ESP;
1997
    if (data32) {
1998
        /* 32 bit */
1999
        esp -= 4;
2000
        while (--level) {
2001
            esp -= 4;
2002
            ebp -= 4;
2003
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2004
        }
2005
        esp -= 4;
2006
        stl(ssp + (esp & esp_mask), t1);
2007
    } else {
2008
        /* 16 bit */
2009
        esp -= 2;
2010
        while (--level) {
2011
            esp -= 2;
2012
            ebp -= 2;
2013
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2014
        }
2015
        esp -= 2;
2016
        stw(ssp + (esp & esp_mask), t1);
2017
    }
2018
}
2019

    
2020
#ifdef TARGET_X86_64
2021
void helper_enter64_level(int level, int data64, target_ulong t1)
2022
{
2023
    target_ulong esp, ebp;
2024
    ebp = EBP;
2025
    esp = ESP;
2026

    
2027
    if (data64) {
2028
        /* 64 bit */
2029
        esp -= 8;
2030
        while (--level) {
2031
            esp -= 8;
2032
            ebp -= 8;
2033
            stq(esp, ldq(ebp));
2034
        }
2035
        esp -= 8;
2036
        stq(esp, t1);
2037
    } else {
2038
        /* 16 bit */
2039
        esp -= 2;
2040
        while (--level) {
2041
            esp -= 2;
2042
            ebp -= 2;
2043
            stw(esp, lduw(ebp));
2044
        }
2045
        esp -= 2;
2046
        stw(esp, t1);
2047
    }
2048
}
2049
#endif
2050

    
2051
void helper_lldt(int selector)
2052
{
2053
    SegmentCache *dt;
2054
    uint32_t e1, e2;
2055
    int index, entry_limit;
2056
    target_ulong ptr;
2057

    
2058
    selector &= 0xffff;
2059
    if ((selector & 0xfffc) == 0) {
2060
        /* XXX: NULL selector case: invalid LDT */
2061
        env->ldt.base = 0;
2062
        env->ldt.limit = 0;
2063
    } else {
2064
        if (selector & 0x4)
2065
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2066
        dt = &env->gdt;
2067
        index = selector & ~7;
2068
#ifdef TARGET_X86_64
2069
        if (env->hflags & HF_LMA_MASK)
2070
            entry_limit = 15;
2071
        else
2072
#endif
2073
            entry_limit = 7;
2074
        if ((index + entry_limit) > dt->limit)
2075
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2076
        ptr = dt->base + index;
2077
        e1 = ldl_kernel(ptr);
2078
        e2 = ldl_kernel(ptr + 4);
2079
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2080
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2081
        if (!(e2 & DESC_P_MASK))
2082
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2083
#ifdef TARGET_X86_64
2084
        if (env->hflags & HF_LMA_MASK) {
2085
            uint32_t e3;
2086
            e3 = ldl_kernel(ptr + 8);
2087
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2088
            env->ldt.base |= (target_ulong)e3 << 32;
2089
        } else
2090
#endif
2091
        {
2092
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2093
        }
2094
    }
2095
    env->ldt.selector = selector;
2096
}
2097

    
2098
void helper_ltr(int selector)
2099
{
2100
    SegmentCache *dt;
2101
    uint32_t e1, e2;
2102
    int index, type, entry_limit;
2103
    target_ulong ptr;
2104

    
2105
    selector &= 0xffff;
2106
    if ((selector & 0xfffc) == 0) {
2107
        /* NULL selector case: invalid TR */
2108
        env->tr.base = 0;
2109
        env->tr.limit = 0;
2110
        env->tr.flags = 0;
2111
    } else {
2112
        if (selector & 0x4)
2113
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2114
        dt = &env->gdt;
2115
        index = selector & ~7;
2116
#ifdef TARGET_X86_64
2117
        if (env->hflags & HF_LMA_MASK)
2118
            entry_limit = 15;
2119
        else
2120
#endif
2121
            entry_limit = 7;
2122
        if ((index + entry_limit) > dt->limit)
2123
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2124
        ptr = dt->base + index;
2125
        e1 = ldl_kernel(ptr);
2126
        e2 = ldl_kernel(ptr + 4);
2127
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2128
        if ((e2 & DESC_S_MASK) ||
2129
            (type != 1 && type != 9))
2130
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131
        if (!(e2 & DESC_P_MASK))
2132
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2133
#ifdef TARGET_X86_64
2134
        if (env->hflags & HF_LMA_MASK) {
2135
            uint32_t e3, e4;
2136
            e3 = ldl_kernel(ptr + 8);
2137
            e4 = ldl_kernel(ptr + 12);
2138
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2139
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2141
            env->tr.base |= (target_ulong)e3 << 32;
2142
        } else
2143
#endif
2144
        {
2145
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2146
        }
2147
        e2 |= DESC_TSS_BUSY_MASK;
2148
        stl_kernel(ptr + 4, e2);
2149
    }
2150
    env->tr.selector = selector;
2151
}
2152

    
2153
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2154
void helper_load_seg(int seg_reg, int selector)
2155
{
2156
    uint32_t e1, e2;
2157
    int cpl, dpl, rpl;
2158
    SegmentCache *dt;
2159
    int index;
2160
    target_ulong ptr;
2161

    
2162
    selector &= 0xffff;
2163
    cpl = env->hflags & HF_CPL_MASK;
2164
    if ((selector & 0xfffc) == 0) {
2165
        /* null selector case */
2166
        if (seg_reg == R_SS
2167
#ifdef TARGET_X86_64
2168
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2169
#endif
2170
            )
2171
            raise_exception_err(EXCP0D_GPF, 0);
2172
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2173
    } else {
2174

    
2175
        if (selector & 0x4)
2176
            dt = &env->ldt;
2177
        else
2178
            dt = &env->gdt;
2179
        index = selector & ~7;
2180
        if ((index + 7) > dt->limit)
2181
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2182
        ptr = dt->base + index;
2183
        e1 = ldl_kernel(ptr);
2184
        e2 = ldl_kernel(ptr + 4);
2185

    
2186
        if (!(e2 & DESC_S_MASK))
2187
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2188
        rpl = selector & 3;
2189
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2190
        if (seg_reg == R_SS) {
2191
            /* must be writable segment */
2192
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2193
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2194
            if (rpl != cpl || dpl != cpl)
2195
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196
        } else {
2197
            /* must be readable segment */
2198
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2199
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2200

    
2201
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2202
                /* if not conforming code, test rights */
2203
                if (dpl < cpl || dpl < rpl)
2204
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205
            }
2206
        }
2207

    
2208
        if (!(e2 & DESC_P_MASK)) {
2209
            if (seg_reg == R_SS)
2210
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2211
            else
2212
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2213
        }
2214

    
2215
        /* set the access bit if not already set */
2216
        if (!(e2 & DESC_A_MASK)) {
2217
            e2 |= DESC_A_MASK;
2218
            stl_kernel(ptr + 4, e2);
2219
        }
2220

    
2221
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2222
                       get_seg_base(e1, e2),
2223
                       get_seg_limit(e1, e2),
2224
                       e2);
2225
#if 0
2226
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2227
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2228
#endif
2229
    }
2230
}
2231

    
2232
/* protected mode jump */
2233
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2234
                           int next_eip_addend)
2235
{
2236
    int gate_cs, type;
2237
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2238
    target_ulong next_eip;
2239

    
2240
    if ((new_cs & 0xfffc) == 0)
2241
        raise_exception_err(EXCP0D_GPF, 0);
2242
    if (load_segment(&e1, &e2, new_cs) != 0)
2243
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244
    cpl = env->hflags & HF_CPL_MASK;
2245
    if (e2 & DESC_S_MASK) {
2246
        if (!(e2 & DESC_CS_MASK))
2247
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2249
        if (e2 & DESC_C_MASK) {
2250
            /* conforming code segment */
2251
            if (dpl > cpl)
2252
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253
        } else {
2254
            /* non conforming code segment */
2255
            rpl = new_cs & 3;
2256
            if (rpl > cpl)
2257
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2258
            if (dpl != cpl)
2259
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260
        }
2261
        if (!(e2 & DESC_P_MASK))
2262
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2263
        limit = get_seg_limit(e1, e2);
2264
        if (new_eip > limit &&
2265
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2266
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2267
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2268
                       get_seg_base(e1, e2), limit, e2);
2269
        EIP = new_eip;
2270
    } else {
2271
        /* jump to call or task gate */
2272
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2273
        rpl = new_cs & 3;
2274
        cpl = env->hflags & HF_CPL_MASK;
2275
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2276
        switch(type) {
2277
        case 1: /* 286 TSS */
2278
        case 9: /* 386 TSS */
2279
        case 5: /* task gate */
2280
            if (dpl < cpl || dpl < rpl)
2281
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282
            next_eip = env->eip + next_eip_addend;
2283
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2284
            CC_OP = CC_OP_EFLAGS;
2285
            break;
2286
        case 4: /* 286 call gate */
2287
        case 12: /* 386 call gate */
2288
            if ((dpl < cpl) || (dpl < rpl))
2289
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2290
            if (!(e2 & DESC_P_MASK))
2291
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2292
            gate_cs = e1 >> 16;
2293
            new_eip = (e1 & 0xffff);
2294
            if (type == 12)
2295
                new_eip |= (e2 & 0xffff0000);
2296
            if (load_segment(&e1, &e2, gate_cs) != 0)
2297
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2298
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2299
            /* must be code segment */
2300
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2301
                 (DESC_S_MASK | DESC_CS_MASK)))
2302
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2303
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2304
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2305
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2306
            if (!(e2 & DESC_P_MASK))
2307
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2308
            limit = get_seg_limit(e1, e2);
2309
            if (new_eip > limit)
2310
                raise_exception_err(EXCP0D_GPF, 0);
2311
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2312
                                   get_seg_base(e1, e2), limit, e2);
2313
            EIP = new_eip;
2314
            break;
2315
        default:
2316
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317
            break;
2318
        }
2319
    }
2320
}
2321

    
2322
/* real mode call */
2323
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2324
                       int shift, int next_eip)
2325
{
2326
    int new_eip;
2327
    uint32_t esp, esp_mask;
2328
    target_ulong ssp;
2329

    
2330
    new_eip = new_eip1;
2331
    esp = ESP;
2332
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2333
    ssp = env->segs[R_SS].base;
2334
    if (shift) {
2335
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2336
        PUSHL(ssp, esp, esp_mask, next_eip);
2337
    } else {
2338
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2339
        PUSHW(ssp, esp, esp_mask, next_eip);
2340
    }
2341

    
2342
    SET_ESP(esp, esp_mask);
2343
    env->eip = new_eip;
2344
    env->segs[R_CS].selector = new_cs;
2345
    env->segs[R_CS].base = (new_cs << 4);
2346
}
2347

    
2348
/* protected mode call */
2349
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2350
                            int shift, int next_eip_addend)
2351
{
2352
    int new_stack, i;
2353
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2354
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2355
    uint32_t val, limit, old_sp_mask;
2356
    target_ulong ssp, old_ssp, next_eip;
2357

    
2358
    next_eip = env->eip + next_eip_addend;
2359
#ifdef DEBUG_PCALL
2360
    if (loglevel & CPU_LOG_PCALL) {
2361
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2362
                new_cs, (uint32_t)new_eip, shift);
2363
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2364
    }
2365
#endif
2366
    if ((new_cs & 0xfffc) == 0)
2367
        raise_exception_err(EXCP0D_GPF, 0);
2368
    if (load_segment(&e1, &e2, new_cs) != 0)
2369
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370
    cpl = env->hflags & HF_CPL_MASK;
2371
#ifdef DEBUG_PCALL
2372
    if (loglevel & CPU_LOG_PCALL) {
2373
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2374
    }
2375
#endif
2376
    if (e2 & DESC_S_MASK) {
2377
        if (!(e2 & DESC_CS_MASK))
2378
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2379
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2380
        if (e2 & DESC_C_MASK) {
2381
            /* conforming code segment */
2382
            if (dpl > cpl)
2383
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384
        } else {
2385
            /* non conforming code segment */
2386
            rpl = new_cs & 3;
2387
            if (rpl > cpl)
2388
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2389
            if (dpl != cpl)
2390
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2391
        }
2392
        if (!(e2 & DESC_P_MASK))
2393
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2394

    
2395
#ifdef TARGET_X86_64
2396
        /* XXX: check 16/32 bit cases in long mode */
2397
        if (shift == 2) {
2398
            target_ulong rsp;
2399
            /* 64 bit case */
2400
            rsp = ESP;
2401
            PUSHQ(rsp, env->segs[R_CS].selector);
2402
            PUSHQ(rsp, next_eip);
2403
            /* from this point, not restartable */
2404
            ESP = rsp;
2405
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2406
                                   get_seg_base(e1, e2),
2407
                                   get_seg_limit(e1, e2), e2);
2408
            EIP = new_eip;
2409
        } else
2410
#endif
2411
        {
2412
            sp = ESP;
2413
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2414
            ssp = env->segs[R_SS].base;
2415
            if (shift) {
2416
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2417
                PUSHL(ssp, sp, sp_mask, next_eip);
2418
            } else {
2419
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2420
                PUSHW(ssp, sp, sp_mask, next_eip);
2421
            }
2422

    
2423
            limit = get_seg_limit(e1, e2);
2424
            if (new_eip > limit)
2425
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2426
            /* from this point, not restartable */
2427
            SET_ESP(sp, sp_mask);
2428
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2429
                                   get_seg_base(e1, e2), limit, e2);
2430
            EIP = new_eip;
2431
        }
2432
    } else {
2433
        /* check gate type */
2434
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2435
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2436
        rpl = new_cs & 3;
2437
        switch(type) {
2438
        case 1: /* available 286 TSS */
2439
        case 9: /* available 386 TSS */
2440
        case 5: /* task gate */
2441
            if (dpl < cpl || dpl < rpl)
2442
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2444
            CC_OP = CC_OP_EFLAGS;
2445
            return;
2446
        case 4: /* 286 call gate */
2447
        case 12: /* 386 call gate */
2448
            break;
2449
        default:
2450
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2451
            break;
2452
        }
2453
        shift = type >> 3;
2454

    
2455
        if (dpl < cpl || dpl < rpl)
2456
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2457
        /* check valid bit */
2458
        if (!(e2 & DESC_P_MASK))
2459
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2460
        selector = e1 >> 16;
2461
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2462
        param_count = e2 & 0x1f;
2463
        if ((selector & 0xfffc) == 0)
2464
            raise_exception_err(EXCP0D_GPF, 0);
2465

    
2466
        if (load_segment(&e1, &e2, selector) != 0)
2467
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2469
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2470
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2471
        if (dpl > cpl)
2472
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2473
        if (!(e2 & DESC_P_MASK))
2474
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2475

    
2476
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2477
            /* to inner privilege */
2478
            get_ss_esp_from_tss(&ss, &sp, dpl);
2479
#ifdef DEBUG_PCALL
2480
            if (loglevel & CPU_LOG_PCALL)
2481
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2482
                        ss, sp, param_count, ESP);
2483
#endif
2484
            if ((ss & 0xfffc) == 0)
2485
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2486
            if ((ss & 3) != dpl)
2487
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2489
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2490
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2491
            if (ss_dpl != dpl)
2492
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2493
            if (!(ss_e2 & DESC_S_MASK) ||
2494
                (ss_e2 & DESC_CS_MASK) ||
2495
                !(ss_e2 & DESC_W_MASK))
2496
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2497
            if (!(ss_e2 & DESC_P_MASK))
2498
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2499

    
2500
            //            push_size = ((param_count * 2) + 8) << shift;
2501

    
2502
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2503
            old_ssp = env->segs[R_SS].base;
2504

    
2505
            sp_mask = get_sp_mask(ss_e2);
2506
            ssp = get_seg_base(ss_e1, ss_e2);
2507
            if (shift) {
2508
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2509
                PUSHL(ssp, sp, sp_mask, ESP);
2510
                for(i = param_count - 1; i >= 0; i--) {
2511
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2512
                    PUSHL(ssp, sp, sp_mask, val);
2513
                }
2514
            } else {
2515
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2516
                PUSHW(ssp, sp, sp_mask, ESP);
2517
                for(i = param_count - 1; i >= 0; i--) {
2518
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2519
                    PUSHW(ssp, sp, sp_mask, val);
2520
                }
2521
            }
2522
            new_stack = 1;
2523
        } else {
2524
            /* to same privilege */
2525
            sp = ESP;
2526
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2527
            ssp = env->segs[R_SS].base;
2528
            //            push_size = (4 << shift);
2529
            new_stack = 0;
2530
        }
2531

    
2532
        if (shift) {
2533
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2534
            PUSHL(ssp, sp, sp_mask, next_eip);
2535
        } else {
2536
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2537
            PUSHW(ssp, sp, sp_mask, next_eip);
2538
        }
2539

    
2540
        /* from this point, not restartable */
2541

    
2542
        if (new_stack) {
2543
            ss = (ss & ~3) | dpl;
2544
            cpu_x86_load_seg_cache(env, R_SS, ss,
2545
                                   ssp,
2546
                                   get_seg_limit(ss_e1, ss_e2),
2547
                                   ss_e2);
2548
        }
2549

    
2550
        selector = (selector & ~3) | dpl;
2551
        cpu_x86_load_seg_cache(env, R_CS, selector,
2552
                       get_seg_base(e1, e2),
2553
                       get_seg_limit(e1, e2),
2554
                       e2);
2555
        cpu_x86_set_cpl(env, dpl);
2556
        SET_ESP(sp, sp_mask);
2557
        EIP = offset;
2558
    }
2559
#ifdef USE_KQEMU
2560
    if (kqemu_is_ok(env)) {
2561
        env->exception_index = -1;
2562
        cpu_loop_exit();
2563
    }
2564
#endif
2565
}
2566

    
2567
/* real and vm86 mode iret */
2568
void helper_iret_real(int shift)
2569
{
2570
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2571
    target_ulong ssp;
2572
    int eflags_mask;
2573

    
2574
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2575
    sp = ESP;
2576
    ssp = env->segs[R_SS].base;
2577
    if (shift == 1) {
2578
        /* 32 bits */
2579
        POPL(ssp, sp, sp_mask, new_eip);
2580
        POPL(ssp, sp, sp_mask, new_cs);
2581
        new_cs &= 0xffff;
2582
        POPL(ssp, sp, sp_mask, new_eflags);
2583
    } else {
2584
        /* 16 bits */
2585
        POPW(ssp, sp, sp_mask, new_eip);
2586
        POPW(ssp, sp, sp_mask, new_cs);
2587
        POPW(ssp, sp, sp_mask, new_eflags);
2588
    }
2589
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2590
    load_seg_vm(R_CS, new_cs);
2591
    env->eip = new_eip;
2592
    if (env->eflags & VM_MASK)
2593
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2594
    else
2595
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2596
    if (shift == 0)
2597
        eflags_mask &= 0xffff;
2598
    load_eflags(new_eflags, eflags_mask);
2599
    env->hflags &= ~HF_NMI_MASK;
2600
}
2601

    
2602
static inline void validate_seg(int seg_reg, int cpl)
2603
{
2604
    int dpl;
2605
    uint32_t e2;
2606

    
2607
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2608
       they may still contain a valid base. I would be interested to
2609
       know how a real x86_64 CPU behaves */
2610
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2611
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2612
        return;
2613

    
2614
    e2 = env->segs[seg_reg].flags;
2615
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2616
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2617
        /* data or non conforming code segment */
2618
        if (dpl < cpl) {
2619
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2620
        }
2621
    }
2622
}
2623

    
2624
/* protected mode iret */
2625
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2626
{
2627
    uint32_t new_cs, new_eflags, new_ss;
2628
    uint32_t new_es, new_ds, new_fs, new_gs;
2629
    uint32_t e1, e2, ss_e1, ss_e2;
2630
    int cpl, dpl, rpl, eflags_mask, iopl;
2631
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2632

    
2633
#ifdef TARGET_X86_64
2634
    if (shift == 2)
2635
        sp_mask = -1;
2636
    else
2637
#endif
2638
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2639
    sp = ESP;
2640
    ssp = env->segs[R_SS].base;
2641
    new_eflags = 0; /* avoid warning */
2642
#ifdef TARGET_X86_64
2643
    if (shift == 2) {
2644
        POPQ(sp, new_eip);
2645
        POPQ(sp, new_cs);
2646
        new_cs &= 0xffff;
2647
        if (is_iret) {
2648
            POPQ(sp, new_eflags);
2649
        }
2650
    } else
2651
#endif
2652
    if (shift == 1) {
2653
        /* 32 bits */
2654
        POPL(ssp, sp, sp_mask, new_eip);
2655
        POPL(ssp, sp, sp_mask, new_cs);
2656
        new_cs &= 0xffff;
2657
        if (is_iret) {
2658
            POPL(ssp, sp, sp_mask, new_eflags);
2659
            if (new_eflags & VM_MASK)
2660
                goto return_to_vm86;
2661
        }
2662
    } else {
2663
        /* 16 bits */
2664
        POPW(ssp, sp, sp_mask, new_eip);
2665
        POPW(ssp, sp, sp_mask, new_cs);
2666
        if (is_iret)
2667
            POPW(ssp, sp, sp_mask, new_eflags);
2668
    }
2669
#ifdef DEBUG_PCALL
2670
    if (loglevel & CPU_LOG_PCALL) {
2671
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2672
                new_cs, new_eip, shift, addend);
2673
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2674
    }
2675
#endif
2676
    if ((new_cs & 0xfffc) == 0)
2677
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678
    if (load_segment(&e1, &e2, new_cs) != 0)
2679
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2680
    if (!(e2 & DESC_S_MASK) ||
2681
        !(e2 & DESC_CS_MASK))
2682
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2683
    cpl = env->hflags & HF_CPL_MASK;
2684
    rpl = new_cs & 3;
2685
    if (rpl < cpl)
2686
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2687
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2688
    if (e2 & DESC_C_MASK) {
2689
        if (dpl > rpl)
2690
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2691
    } else {
2692
        if (dpl != rpl)
2693
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2694
    }
2695
    if (!(e2 & DESC_P_MASK))
2696
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2697

    
2698
    sp += addend;
2699
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2700
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2701
        /* return to same privilege level */
2702
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2703
                       get_seg_base(e1, e2),
2704
                       get_seg_limit(e1, e2),
2705
                       e2);
2706
    } else {
2707
        /* return to different privilege level */
2708
#ifdef TARGET_X86_64
2709
        if (shift == 2) {
2710
            POPQ(sp, new_esp);
2711
            POPQ(sp, new_ss);
2712
            new_ss &= 0xffff;
2713
        } else
2714
#endif
2715
        if (shift == 1) {
2716
            /* 32 bits */
2717
            POPL(ssp, sp, sp_mask, new_esp);
2718
            POPL(ssp, sp, sp_mask, new_ss);
2719
            new_ss &= 0xffff;
2720
        } else {
2721
            /* 16 bits */
2722
            POPW(ssp, sp, sp_mask, new_esp);
2723
            POPW(ssp, sp, sp_mask, new_ss);
2724
        }
2725
#ifdef DEBUG_PCALL
2726
        if (loglevel & CPU_LOG_PCALL) {
2727
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2728
                    new_ss, new_esp);
2729
        }
2730
#endif
2731
        if ((new_ss & 0xfffc) == 0) {
2732
#ifdef TARGET_X86_64
2733
            /* NULL ss is allowed in long mode if cpl != 3*/
2734
            /* XXX: test CS64 ? */
2735
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2736
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2737
                                       0, 0xffffffff,
2738
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2739
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2740
                                       DESC_W_MASK | DESC_A_MASK);
2741
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2742
            } else
2743
#endif
2744
            {
2745
                raise_exception_err(EXCP0D_GPF, 0);
2746
            }
2747
        } else {
2748
            if ((new_ss & 3) != rpl)
2749
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2750
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2751
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2752
            if (!(ss_e2 & DESC_S_MASK) ||
2753
                (ss_e2 & DESC_CS_MASK) ||
2754
                !(ss_e2 & DESC_W_MASK))
2755
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2756
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2757
            if (dpl != rpl)
2758
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2759
            if (!(ss_e2 & DESC_P_MASK))
2760
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2761
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2762
                                   get_seg_base(ss_e1, ss_e2),
2763
                                   get_seg_limit(ss_e1, ss_e2),
2764
                                   ss_e2);
2765
        }
2766

    
2767
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2768
                       get_seg_base(e1, e2),
2769
                       get_seg_limit(e1, e2),
2770
                       e2);
2771
        cpu_x86_set_cpl(env, rpl);
2772
        sp = new_esp;
2773
#ifdef TARGET_X86_64
2774
        if (env->hflags & HF_CS64_MASK)
2775
            sp_mask = -1;
2776
        else
2777
#endif
2778
            sp_mask = get_sp_mask(ss_e2);
2779

    
2780
        /* validate data segments */
2781
        validate_seg(R_ES, rpl);
2782
        validate_seg(R_DS, rpl);
2783
        validate_seg(R_FS, rpl);
2784
        validate_seg(R_GS, rpl);
2785

    
2786
        sp += addend;
2787
    }
2788
    SET_ESP(sp, sp_mask);
2789
    env->eip = new_eip;
2790
    if (is_iret) {
2791
        /* NOTE: 'cpl' is the _old_ CPL */
2792
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2793
        if (cpl == 0)
2794
            eflags_mask |= IOPL_MASK;
2795
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2796
        if (cpl <= iopl)
2797
            eflags_mask |= IF_MASK;
2798
        if (shift == 0)
2799
            eflags_mask &= 0xffff;
2800
        load_eflags(new_eflags, eflags_mask);
2801
    }
2802
    return;
2803

    
2804
 return_to_vm86:
2805
    POPL(ssp, sp, sp_mask, new_esp);
2806
    POPL(ssp, sp, sp_mask, new_ss);
2807
    POPL(ssp, sp, sp_mask, new_es);
2808
    POPL(ssp, sp, sp_mask, new_ds);
2809
    POPL(ssp, sp, sp_mask, new_fs);
2810
    POPL(ssp, sp, sp_mask, new_gs);
2811

    
2812
    /* modify processor state */
2813
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2814
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2815
    load_seg_vm(R_CS, new_cs & 0xffff);
2816
    cpu_x86_set_cpl(env, 3);
2817
    load_seg_vm(R_SS, new_ss & 0xffff);
2818
    load_seg_vm(R_ES, new_es & 0xffff);
2819
    load_seg_vm(R_DS, new_ds & 0xffff);
2820
    load_seg_vm(R_FS, new_fs & 0xffff);
2821
    load_seg_vm(R_GS, new_gs & 0xffff);
2822

    
2823
    env->eip = new_eip & 0xffff;
2824
    ESP = new_esp;
2825
}
2826

    
2827
void helper_iret_protected(int shift, int next_eip)
2828
{
2829
    int tss_selector, type;
2830
    uint32_t e1, e2;
2831

    
2832
    /* specific case for TSS */
2833
    if (env->eflags & NT_MASK) {
2834
#ifdef TARGET_X86_64
2835
        if (env->hflags & HF_LMA_MASK)
2836
            raise_exception_err(EXCP0D_GPF, 0);
2837
#endif
2838
        tss_selector = lduw_kernel(env->tr.base + 0);
2839
        if (tss_selector & 4)
2840
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2841
        if (load_segment(&e1, &e2, tss_selector) != 0)
2842
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2843
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2844
        /* NOTE: we check both segment and busy TSS */
2845
        if (type != 3)
2846
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2847
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2848
    } else {
2849
        helper_ret_protected(shift, 1, 0);
2850
    }
2851
    env->hflags &= ~HF_NMI_MASK;
2852
#ifdef USE_KQEMU
2853
    if (kqemu_is_ok(env)) {
2854
        CC_OP = CC_OP_EFLAGS;
2855
        env->exception_index = -1;
2856
        cpu_loop_exit();
2857
    }
2858
#endif
2859
}
2860

    
2861
void helper_lret_protected(int shift, int addend)
2862
{
2863
    helper_ret_protected(shift, 0, addend);
2864
#ifdef USE_KQEMU
2865
    if (kqemu_is_ok(env)) {
2866
        env->exception_index = -1;
2867
        cpu_loop_exit();
2868
    }
2869
#endif
2870
}
2871

    
2872
void helper_sysenter(void)
2873
{
2874
    if (env->sysenter_cs == 0) {
2875
        raise_exception_err(EXCP0D_GPF, 0);
2876
    }
2877
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2878
    cpu_x86_set_cpl(env, 0);
2879
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2880
                           0, 0xffffffff,
2881
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2882
                           DESC_S_MASK |
2883
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2884
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2885
                           0, 0xffffffff,
2886
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2887
                           DESC_S_MASK |
2888
                           DESC_W_MASK | DESC_A_MASK);
2889
    ESP = env->sysenter_esp;
2890
    EIP = env->sysenter_eip;
2891
}
2892

    
2893
void helper_sysexit(void)
2894
{
2895
    int cpl;
2896

    
2897
    cpl = env->hflags & HF_CPL_MASK;
2898
    if (env->sysenter_cs == 0 || cpl != 0) {
2899
        raise_exception_err(EXCP0D_GPF, 0);
2900
    }
2901
    cpu_x86_set_cpl(env, 3);
2902
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2903
                           0, 0xffffffff,
2904
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2905
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2906
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2907
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2908
                           0, 0xffffffff,
2909
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2910
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2911
                           DESC_W_MASK | DESC_A_MASK);
2912
    ESP = ECX;
2913
    EIP = EDX;
2914
#ifdef USE_KQEMU
2915
    if (kqemu_is_ok(env)) {
2916
        env->exception_index = -1;
2917
        cpu_loop_exit();
2918
    }
2919
#endif
2920
}
2921

    
2922
#if defined(CONFIG_USER_ONLY)
2923
target_ulong helper_read_crN(int reg)
2924
{
2925
    return 0;
2926
}
2927

    
2928
void helper_write_crN(int reg, target_ulong t0)
2929
{
2930
}
2931
#else
2932
target_ulong helper_read_crN(int reg)
2933
{
2934
    target_ulong val;
2935

    
2936
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2937
    switch(reg) {
2938
    default:
2939
        val = env->cr[reg];
2940
        break;
2941
    case 8:
2942
        val = cpu_get_apic_tpr(env);
2943
        break;
2944
    }
2945
    return val;
2946
}
2947

    
2948
void helper_write_crN(int reg, target_ulong t0)
2949
{
2950
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2951
    switch(reg) {
2952
    case 0:
2953
        cpu_x86_update_cr0(env, t0);
2954
        break;
2955
    case 3:
2956
        cpu_x86_update_cr3(env, t0);
2957
        break;
2958
    case 4:
2959
        cpu_x86_update_cr4(env, t0);
2960
        break;
2961
    case 8:
2962
        cpu_set_apic_tpr(env, t0);
2963
        env->cr[8] = t0;
2964
        break;
2965
    default:
2966
        env->cr[reg] = t0;
2967
        break;
2968
    }
2969
}
2970
#endif
2971

    
2972
void helper_lmsw(target_ulong t0)
2973
{
2974
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2975
       if already set to one. */
2976
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2977
    helper_write_crN(0, t0);
2978
}
2979

    
2980
void helper_clts(void)
2981
{
2982
    env->cr[0] &= ~CR0_TS_MASK;
2983
    env->hflags &= ~HF_TS_MASK;
2984
}
2985

    
2986
#if !defined(CONFIG_USER_ONLY)
2987
target_ulong helper_movtl_T0_cr8(void)
2988
{
2989
    return cpu_get_apic_tpr(env);
2990
}
2991
#endif
2992

    
2993
/* XXX: do more */
2994
void helper_movl_drN_T0(int reg, target_ulong t0)
2995
{
2996
    env->dr[reg] = t0;
2997
}
2998

    
2999
void helper_invlpg(target_ulong addr)
3000
{
3001
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3002
    cpu_x86_flush_tlb(env, addr);
3003
}
3004

    
3005
void helper_rdtsc(void)
3006
{
3007
    uint64_t val;
3008

    
3009
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3010
        raise_exception(EXCP0D_GPF);
3011
    }
3012
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3013

    
3014
    val = cpu_get_tsc(env);
3015
    EAX = (uint32_t)(val);
3016
    EDX = (uint32_t)(val >> 32);
3017
}
3018

    
3019
void helper_rdpmc(void)
3020
{
3021
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3022
        raise_exception(EXCP0D_GPF);
3023
    }
3024
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3025
    
3026
    /* currently unimplemented */
3027
    raise_exception_err(EXCP06_ILLOP, 0);
3028
}
3029

    
3030
#if defined(CONFIG_USER_ONLY)
3031
void helper_wrmsr(void)
3032
{
3033
}
3034

    
3035
void helper_rdmsr(void)
3036
{
3037
}
3038
#else
3039
void helper_wrmsr(void)
3040
{
3041
    uint64_t val;
3042

    
3043
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3044

    
3045
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3046

    
3047
    switch((uint32_t)ECX) {
3048
    case MSR_IA32_SYSENTER_CS:
3049
        env->sysenter_cs = val & 0xffff;
3050
        break;
3051
    case MSR_IA32_SYSENTER_ESP:
3052
        env->sysenter_esp = val;
3053
        break;
3054
    case MSR_IA32_SYSENTER_EIP:
3055
        env->sysenter_eip = val;
3056
        break;
3057
    case MSR_IA32_APICBASE:
3058
        cpu_set_apic_base(env, val);
3059
        break;
3060
    case MSR_EFER:
3061
        {
3062
            uint64_t update_mask;
3063
            update_mask = 0;
3064
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3065
                update_mask |= MSR_EFER_SCE;
3066
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3067
                update_mask |= MSR_EFER_LME;
3068
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3069
                update_mask |= MSR_EFER_FFXSR;
3070
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3071
                update_mask |= MSR_EFER_NXE;
3072
            env->efer = (env->efer & ~update_mask) |
3073
            (val & update_mask);
3074
        }
3075
        break;
3076
    case MSR_STAR:
3077
        env->star = val;
3078
        break;
3079
    case MSR_PAT:
3080
        env->pat = val;
3081
        break;
3082
    case MSR_VM_HSAVE_PA:
3083
        env->vm_hsave = val;
3084
        break;
3085
#ifdef TARGET_X86_64
3086
    case MSR_LSTAR:
3087
        env->lstar = val;
3088
        break;
3089
    case MSR_CSTAR:
3090
        env->cstar = val;
3091
        break;
3092
    case MSR_FMASK:
3093
        env->fmask = val;
3094
        break;
3095
    case MSR_FSBASE:
3096
        env->segs[R_FS].base = val;
3097
        break;
3098
    case MSR_GSBASE:
3099
        env->segs[R_GS].base = val;
3100
        break;
3101
    case MSR_KERNELGSBASE:
3102
        env->kernelgsbase = val;
3103
        break;
3104
#endif
3105
    default:
3106
        /* XXX: exception ? */
3107
        break;
3108
    }
3109
}
3110

    
3111
void helper_rdmsr(void)
3112
{
3113
    uint64_t val;
3114

    
3115
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3116

    
3117
    switch((uint32_t)ECX) {
3118
    case MSR_IA32_SYSENTER_CS:
3119
        val = env->sysenter_cs;
3120
        break;
3121
    case MSR_IA32_SYSENTER_ESP:
3122
        val = env->sysenter_esp;
3123
        break;
3124
    case MSR_IA32_SYSENTER_EIP:
3125
        val = env->sysenter_eip;
3126
        break;
3127
    case MSR_IA32_APICBASE:
3128
        val = cpu_get_apic_base(env);
3129
        break;
3130
    case MSR_EFER:
3131
        val = env->efer;
3132
        break;
3133
    case MSR_STAR:
3134
        val = env->star;
3135
        break;
3136
    case MSR_PAT:
3137
        val = env->pat;
3138
        break;
3139
    case MSR_VM_HSAVE_PA:
3140
        val = env->vm_hsave;
3141
        break;
3142
#ifdef TARGET_X86_64
3143
    case MSR_LSTAR:
3144
        val = env->lstar;
3145
        break;
3146
    case MSR_CSTAR:
3147
        val = env->cstar;
3148
        break;
3149
    case MSR_FMASK:
3150
        val = env->fmask;
3151
        break;
3152
    case MSR_FSBASE:
3153
        val = env->segs[R_FS].base;
3154
        break;
3155
    case MSR_GSBASE:
3156
        val = env->segs[R_GS].base;
3157
        break;
3158
    case MSR_KERNELGSBASE:
3159
        val = env->kernelgsbase;
3160
        break;
3161
#endif
3162
#ifdef USE_KQEMU
3163
    case MSR_QPI_COMMBASE:
3164
        if (env->kqemu_enabled) {
3165
            val = kqemu_comm_base;
3166
        } else {
3167
            val = 0;
3168
        }
3169
        break;
3170
#endif
3171
    default:
3172
        /* XXX: exception ? */
3173
        val = 0;
3174
        break;
3175
    }
3176
    EAX = (uint32_t)(val);
3177
    EDX = (uint32_t)(val >> 32);
3178
}
3179
#endif
3180

    
3181
target_ulong helper_lsl(target_ulong selector1)
3182
{
3183
    unsigned int limit;
3184
    uint32_t e1, e2, eflags, selector;
3185
    int rpl, dpl, cpl, type;
3186

    
3187
    selector = selector1 & 0xffff;
3188
    eflags = cc_table[CC_OP].compute_all();
3189
    if (load_segment(&e1, &e2, selector) != 0)
3190
        goto fail;
3191
    rpl = selector & 3;
3192
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3193
    cpl = env->hflags & HF_CPL_MASK;
3194
    if (e2 & DESC_S_MASK) {
3195
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3196
            /* conforming */
3197
        } else {
3198
            if (dpl < cpl || dpl < rpl)
3199
                goto fail;
3200
        }
3201
    } else {
3202
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3203
        switch(type) {
3204
        case 1:
3205
        case 2:
3206
        case 3:
3207
        case 9:
3208
        case 11:
3209
            break;
3210
        default:
3211
            goto fail;
3212
        }
3213
        if (dpl < cpl || dpl < rpl) {
3214
        fail:
3215
            CC_SRC = eflags & ~CC_Z;
3216
            return 0;
3217
        }
3218
    }
3219
    limit = get_seg_limit(e1, e2);
3220
    CC_SRC = eflags | CC_Z;
3221
    return limit;
3222
}
3223

    
3224
target_ulong helper_lar(target_ulong selector1)
3225
{
3226
    uint32_t e1, e2, eflags, selector;
3227
    int rpl, dpl, cpl, type;
3228

    
3229
    selector = selector1 & 0xffff;
3230
    eflags = cc_table[CC_OP].compute_all();
3231
    if ((selector & 0xfffc) == 0)
3232
        goto fail;
3233
    if (load_segment(&e1, &e2, selector) != 0)
3234
        goto fail;
3235
    rpl = selector & 3;
3236
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3237
    cpl = env->hflags & HF_CPL_MASK;
3238
    if (e2 & DESC_S_MASK) {
3239
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3240
            /* conforming */
3241
        } else {
3242
            if (dpl < cpl || dpl < rpl)
3243
                goto fail;
3244
        }
3245
    } else {
3246
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3247
        switch(type) {
3248
        case 1:
3249
        case 2:
3250
        case 3:
3251
        case 4:
3252
        case 5:
3253
        case 9:
3254
        case 11:
3255
        case 12:
3256
            break;
3257
        default:
3258
            goto fail;
3259
        }
3260
        if (dpl < cpl || dpl < rpl) {
3261
        fail:
3262
            CC_SRC = eflags & ~CC_Z;
3263
            return 0;
3264
        }
3265
    }
3266
    CC_SRC = eflags | CC_Z;
3267
    return e2 & 0x00f0ff00;
3268
}
3269

    
3270
void helper_verr(target_ulong selector1)
3271
{
3272
    uint32_t e1, e2, eflags, selector;
3273
    int rpl, dpl, cpl;
3274

    
3275
    selector = selector1 & 0xffff;
3276
    eflags = cc_table[CC_OP].compute_all();
3277
    if ((selector & 0xfffc) == 0)
3278
        goto fail;
3279
    if (load_segment(&e1, &e2, selector) != 0)
3280
        goto fail;
3281
    if (!(e2 & DESC_S_MASK))
3282
        goto fail;
3283
    rpl = selector & 3;
3284
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3285
    cpl = env->hflags & HF_CPL_MASK;
3286
    if (e2 & DESC_CS_MASK) {
3287
        if (!(e2 & DESC_R_MASK))
3288
            goto fail;
3289
        if (!(e2 & DESC_C_MASK)) {
3290
            if (dpl < cpl || dpl < rpl)
3291
                goto fail;
3292
        }
3293
    } else {
3294
        if (dpl < cpl || dpl < rpl) {
3295
        fail:
3296
            CC_SRC = eflags & ~CC_Z;
3297
            return;
3298
        }
3299
    }
3300
    CC_SRC = eflags | CC_Z;
3301
}
3302

    
3303
void helper_verw(target_ulong selector1)
3304
{
3305
    uint32_t e1, e2, eflags, selector;
3306
    int rpl, dpl, cpl;
3307

    
3308
    selector = selector1 & 0xffff;
3309
    eflags = cc_table[CC_OP].compute_all();
3310
    if ((selector & 0xfffc) == 0)
3311
        goto fail;
3312
    if (load_segment(&e1, &e2, selector) != 0)
3313
        goto fail;
3314
    if (!(e2 & DESC_S_MASK))
3315
        goto fail;
3316
    rpl = selector & 3;
3317
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3318
    cpl = env->hflags & HF_CPL_MASK;
3319
    if (e2 & DESC_CS_MASK) {
3320
        goto fail;
3321
    } else {
3322
        if (dpl < cpl || dpl < rpl)
3323
            goto fail;
3324
        if (!(e2 & DESC_W_MASK)) {
3325
        fail:
3326
            CC_SRC = eflags & ~CC_Z;
3327
            return;
3328
        }
3329
    }
3330
    CC_SRC = eflags | CC_Z;
3331
}
3332

    
3333
/* x87 FPU helpers */
3334

    
3335
static void fpu_set_exception(int mask)
3336
{
3337
    env->fpus |= mask;
3338
    if (env->fpus & (~env->fpuc & FPUC_EM))
3339
        env->fpus |= FPUS_SE | FPUS_B;
3340
}
3341

    
3342
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3343
{
3344
    if (b == 0.0)
3345
        fpu_set_exception(FPUS_ZE);
3346
    return a / b;
3347
}
3348

    
3349
void fpu_raise_exception(void)
3350
{
3351
    if (env->cr[0] & CR0_NE_MASK) {
3352
        raise_exception(EXCP10_COPR);
3353
    }
3354
#if !defined(CONFIG_USER_ONLY)
3355
    else {
3356
        cpu_set_ferr(env);
3357
    }
3358
#endif
3359
}
3360

    
3361
void helper_flds_FT0(uint32_t val)
3362
{
3363
    union {
3364
        float32 f;
3365
        uint32_t i;
3366
    } u;
3367
    u.i = val;
3368
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3369
}
3370

    
3371
void helper_fldl_FT0(uint64_t val)
3372
{
3373
    union {
3374
        float64 f;
3375
        uint64_t i;
3376
    } u;
3377
    u.i = val;
3378
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3379
}
3380

    
3381
void helper_fildl_FT0(int32_t val)
3382
{
3383
    FT0 = int32_to_floatx(val, &env->fp_status);
3384
}
3385

    
3386
void helper_flds_ST0(uint32_t val)
3387
{
3388
    int new_fpstt;
3389
    union {
3390
        float32 f;
3391
        uint32_t i;
3392
    } u;
3393
    new_fpstt = (env->fpstt - 1) & 7;
3394
    u.i = val;
3395
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3396
    env->fpstt = new_fpstt;
3397
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3398
}
3399

    
3400
void helper_fldl_ST0(uint64_t val)
3401
{
3402
    int new_fpstt;
3403
    union {
3404
        float64 f;
3405
        uint64_t i;
3406
    } u;
3407
    new_fpstt = (env->fpstt - 1) & 7;
3408
    u.i = val;
3409
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3410
    env->fpstt = new_fpstt;
3411
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3412
}
3413

    
3414
void helper_fildl_ST0(int32_t val)
3415
{
3416
    int new_fpstt;
3417
    new_fpstt = (env->fpstt - 1) & 7;
3418
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3419
    env->fpstt = new_fpstt;
3420
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3421
}
3422

    
3423
void helper_fildll_ST0(int64_t val)
3424
{
3425
    int new_fpstt;
3426
    new_fpstt = (env->fpstt - 1) & 7;
3427
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3428
    env->fpstt = new_fpstt;
3429
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3430
}
3431

    
3432
uint32_t helper_fsts_ST0(void)
3433
{
3434
    union {
3435
        float32 f;
3436
        uint32_t i;
3437
    } u;
3438
    u.f = floatx_to_float32(ST0, &env->fp_status);
3439
    return u.i;
3440
}
3441

    
3442
uint64_t helper_fstl_ST0(void)
3443
{
3444
    union {
3445
        float64 f;
3446
        uint64_t i;
3447
    } u;
3448
    u.f = floatx_to_float64(ST0, &env->fp_status);
3449
    return u.i;
3450
}
3451

    
3452
int32_t helper_fist_ST0(void)
3453
{
3454
    int32_t val;
3455
    val = floatx_to_int32(ST0, &env->fp_status);
3456
    if (val != (int16_t)val)
3457
        val = -32768;
3458
    return val;
3459
}
3460

    
3461
int32_t helper_fistl_ST0(void)
3462
{
3463
    int32_t val;
3464
    val = floatx_to_int32(ST0, &env->fp_status);
3465
    return val;
3466
}
3467

    
3468
int64_t helper_fistll_ST0(void)
3469
{
3470
    int64_t val;
3471
    val = floatx_to_int64(ST0, &env->fp_status);
3472
    return val;
3473
}
3474

    
3475
int32_t helper_fistt_ST0(void)
3476
{
3477
    int32_t val;
3478
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3479
    if (val != (int16_t)val)
3480
        val = -32768;
3481
    return val;
3482
}
3483

    
3484
int32_t helper_fisttl_ST0(void)
3485
{
3486
    int32_t val;
3487
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3488
    return val;
3489
}
3490

    
3491
int64_t helper_fisttll_ST0(void)
3492
{
3493
    int64_t val;
3494
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3495
    return val;
3496
}
3497

    
3498
void helper_fldt_ST0(target_ulong ptr)
3499
{
3500
    int new_fpstt;
3501
    new_fpstt = (env->fpstt - 1) & 7;
3502
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3503
    env->fpstt = new_fpstt;
3504
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3505
}
3506

    
3507
void helper_fstt_ST0(target_ulong ptr)
3508
{
3509
    helper_fstt(ST0, ptr);
3510
}
3511

    
3512
void helper_fpush(void)
3513
{
3514
    fpush();
3515
}
3516

    
3517
void helper_fpop(void)
3518
{
3519
    fpop();
3520
}
3521

    
3522
void helper_fdecstp(void)
3523
{
3524
    env->fpstt = (env->fpstt - 1) & 7;
3525
    env->fpus &= (~0x4700);
3526
}
3527

    
3528
void helper_fincstp(void)
3529
{
3530
    env->fpstt = (env->fpstt + 1) & 7;
3531
    env->fpus &= (~0x4700);
3532
}
3533

    
3534
/* FPU move */
3535

    
3536
void helper_ffree_STN(int st_index)
3537
{
3538
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3539
}
3540

    
3541
void helper_fmov_ST0_FT0(void)
3542
{
3543
    ST0 = FT0;
3544
}
3545

    
3546
void helper_fmov_FT0_STN(int st_index)
3547
{
3548
    FT0 = ST(st_index);
3549
}
3550

    
3551
void helper_fmov_ST0_STN(int st_index)
3552
{
3553
    ST0 = ST(st_index);
3554
}
3555

    
3556
void helper_fmov_STN_ST0(int st_index)
3557
{
3558
    ST(st_index) = ST0;
3559
}
3560

    
3561
void helper_fxchg_ST0_STN(int st_index)
3562
{
3563
    CPU86_LDouble tmp;
3564
    tmp = ST(st_index);
3565
    ST(st_index) = ST0;
3566
    ST0 = tmp;
3567
}
3568

    
3569
/* FPU operations */
3570

    
3571
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3572

    
3573
void helper_fcom_ST0_FT0(void)
3574
{
3575
    int ret;
3576

    
3577
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3578
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3579
    FORCE_RET();
3580
}
3581

    
3582
void helper_fucom_ST0_FT0(void)
3583
{
3584
    int ret;
3585

    
3586
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3587
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3588
    FORCE_RET();
3589
}
3590

    
3591
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3592

    
3593
void helper_fcomi_ST0_FT0(void)
3594
{
3595
    int eflags;
3596
    int ret;
3597

    
3598
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3599
    eflags = cc_table[CC_OP].compute_all();
3600
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3601
    CC_SRC = eflags;
3602
    FORCE_RET();
3603
}
3604

    
3605
void helper_fucomi_ST0_FT0(void)
3606
{
3607
    int eflags;
3608
    int ret;
3609

    
3610
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3611
    eflags = cc_table[CC_OP].compute_all();
3612
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3613
    CC_SRC = eflags;
3614
    FORCE_RET();
3615
}
3616

    
3617
void helper_fadd_ST0_FT0(void)
3618
{
3619
    ST0 += FT0;
3620
}
3621

    
3622
void helper_fmul_ST0_FT0(void)
3623
{
3624
    ST0 *= FT0;
3625
}
3626

    
3627
void helper_fsub_ST0_FT0(void)
3628
{
3629
    ST0 -= FT0;
3630
}
3631

    
3632
void helper_fsubr_ST0_FT0(void)
3633
{
3634
    ST0 = FT0 - ST0;
3635
}
3636

    
3637
void helper_fdiv_ST0_FT0(void)
3638
{
3639
    ST0 = helper_fdiv(ST0, FT0);
3640
}
3641

    
3642
void helper_fdivr_ST0_FT0(void)
3643
{
3644
    ST0 = helper_fdiv(FT0, ST0);
3645
}
3646

    
3647
/* fp operations between STN and ST0 */
3648

    
3649
void helper_fadd_STN_ST0(int st_index)
3650
{
3651
    ST(st_index) += ST0;
3652
}
3653

    
3654
void helper_fmul_STN_ST0(int st_index)
3655
{
3656
    ST(st_index) *= ST0;
3657
}
3658

    
3659
void helper_fsub_STN_ST0(int st_index)
3660
{
3661
    ST(st_index) -= ST0;
3662
}
3663

    
3664
void helper_fsubr_STN_ST0(int st_index)
3665
{
3666
    CPU86_LDouble *p;
3667
    p = &ST(st_index);
3668
    *p = ST0 - *p;
3669
}
3670

    
3671
void helper_fdiv_STN_ST0(int st_index)
3672
{
3673
    CPU86_LDouble *p;
3674
    p = &ST(st_index);
3675
    *p = helper_fdiv(*p, ST0);
3676
}
3677

    
3678
void helper_fdivr_STN_ST0(int st_index)
3679
{
3680
    CPU86_LDouble *p;
3681
    p = &ST(st_index);
3682
    *p = helper_fdiv(ST0, *p);
3683
}
3684

    
3685
/* misc FPU operations */
3686
void helper_fchs_ST0(void)
3687
{
3688
    ST0 = floatx_chs(ST0);
3689
}
3690

    
3691
void helper_fabs_ST0(void)
3692
{
3693
    ST0 = floatx_abs(ST0);
3694
}
3695

    
3696
void helper_fld1_ST0(void)
3697
{
3698
    ST0 = f15rk[1];
3699
}
3700

    
3701
void helper_fldl2t_ST0(void)
3702
{
3703
    ST0 = f15rk[6];
3704
}
3705

    
3706
void helper_fldl2e_ST0(void)
3707
{
3708
    ST0 = f15rk[5];
3709
}
3710

    
3711
void helper_fldpi_ST0(void)
3712
{
3713
    ST0 = f15rk[2];
3714
}
3715

    
3716
void helper_fldlg2_ST0(void)
3717
{
3718
    ST0 = f15rk[3];
3719
}
3720

    
3721
void helper_fldln2_ST0(void)
3722
{
3723
    ST0 = f15rk[4];
3724
}
3725

    
3726
void helper_fldz_ST0(void)
3727
{
3728
    ST0 = f15rk[0];
3729
}
3730

    
3731
void helper_fldz_FT0(void)
3732
{
3733
    FT0 = f15rk[0];
3734
}
3735

    
3736
uint32_t helper_fnstsw(void)
3737
{
3738
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3739
}
3740

    
3741
uint32_t helper_fnstcw(void)
3742
{
3743
    return env->fpuc;
3744
}
3745

    
3746
static void update_fp_status(void)
3747
{
3748
    int rnd_type;
3749

    
3750
    /* set rounding mode */
3751
    switch(env->fpuc & RC_MASK) {
3752
    default:
3753
    case RC_NEAR:
3754
        rnd_type = float_round_nearest_even;
3755
        break;
3756
    case RC_DOWN:
3757
        rnd_type = float_round_down;
3758
        break;
3759
    case RC_UP:
3760
        rnd_type = float_round_up;
3761
        break;
3762
    case RC_CHOP:
3763
        rnd_type = float_round_to_zero;
3764
        break;
3765
    }
3766
    set_float_rounding_mode(rnd_type, &env->fp_status);
3767
#ifdef FLOATX80
3768
    switch((env->fpuc >> 8) & 3) {
3769
    case 0:
3770
        rnd_type = 32;
3771
        break;
3772
    case 2:
3773
        rnd_type = 64;
3774
        break;
3775
    case 3:
3776
    default:
3777
        rnd_type = 80;
3778
        break;
3779
    }
3780
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3781
#endif
3782
}
3783

    
3784
void helper_fldcw(uint32_t val)
3785
{
3786
    env->fpuc = val;
3787
    update_fp_status();
3788
}
3789

    
3790
void helper_fclex(void)
3791
{
3792
    env->fpus &= 0x7f00;
3793
}
3794

    
3795
void helper_fwait(void)
3796
{
3797
    if (env->fpus & FPUS_SE)
3798
        fpu_raise_exception();
3799
    FORCE_RET();
3800
}
3801

    
3802
void helper_fninit(void)
3803
{
3804
    env->fpus = 0;
3805
    env->fpstt = 0;
3806
    env->fpuc = 0x37f;
3807
    env->fptags[0] = 1;
3808
    env->fptags[1] = 1;
3809
    env->fptags[2] = 1;
3810
    env->fptags[3] = 1;
3811
    env->fptags[4] = 1;
3812
    env->fptags[5] = 1;
3813
    env->fptags[6] = 1;
3814
    env->fptags[7] = 1;
3815
}
3816

    
3817
/* BCD ops */
3818

    
3819
void helper_fbld_ST0(target_ulong ptr)
3820
{
3821
    CPU86_LDouble tmp;
3822
    uint64_t val;
3823
    unsigned int v;
3824
    int i;
3825

    
3826
    val = 0;
3827
    for(i = 8; i >= 0; i--) {
3828
        v = ldub(ptr + i);
3829
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3830
    }
3831
    tmp = val;
3832
    if (ldub(ptr + 9) & 0x80)
3833
        tmp = -tmp;
3834
    fpush();
3835
    ST0 = tmp;
3836
}
3837

    
3838
void helper_fbst_ST0(target_ulong ptr)
3839
{
3840
    int v;
3841
    target_ulong mem_ref, mem_end;
3842
    int64_t val;
3843

    
3844
    val = floatx_to_int64(ST0, &env->fp_status);
3845
    mem_ref = ptr;
3846
    mem_end = mem_ref + 9;
3847
    if (val < 0) {
3848
        stb(mem_end, 0x80);
3849
        val = -val;
3850
    } else {
3851
        stb(mem_end, 0x00);
3852
    }
3853
    while (mem_ref < mem_end) {
3854
        if (val == 0)
3855
            break;
3856
        v = val % 100;
3857
        val = val / 100;
3858
        v = ((v / 10) << 4) | (v % 10);
3859
        stb(mem_ref++, v);
3860
    }
3861
    while (mem_ref < mem_end) {
3862
        stb(mem_ref++, 0);
3863
    }
3864
}
3865

    
3866
void helper_f2xm1(void)
3867
{
3868
    ST0 = pow(2.0,ST0) - 1.0;
3869
}
3870

    
3871
void helper_fyl2x(void)
3872
{
3873
    CPU86_LDouble fptemp;
3874

    
3875
    fptemp = ST0;
3876
    if (fptemp>0.0){
3877
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3878
        ST1 *= fptemp;
3879
        fpop();
3880
    } else {
3881
        env->fpus &= (~0x4700);
3882
        env->fpus |= 0x400;
3883
    }
3884
}
3885

    
3886
void helper_fptan(void)
3887
{
3888
    CPU86_LDouble fptemp;
3889

    
3890
    fptemp = ST0;
3891
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3892
        env->fpus |= 0x400;
3893
    } else {
3894
        ST0 = tan(fptemp);
3895
        fpush();
3896
        ST0 = 1.0;
3897
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3898
        /* the above code is for  |arg| < 2**52 only */
3899
    }
3900
}
3901

    
3902
void helper_fpatan(void)
3903
{
3904
    CPU86_LDouble fptemp, fpsrcop;
3905

    
3906
    fpsrcop = ST1;
3907
    fptemp = ST0;
3908
    ST1 = atan2(fpsrcop,fptemp);
3909
    fpop();
3910
}
3911

    
3912
void helper_fxtract(void)
3913
{
3914
    CPU86_LDoubleU temp;
3915
    unsigned int expdif;
3916

    
3917
    temp.d = ST0;
3918
    expdif = EXPD(temp) - EXPBIAS;
3919
    /*DP exponent bias*/
3920
    ST0 = expdif;
3921
    fpush();
3922
    BIASEXPONENT(temp);
3923
    ST0 = temp.d;
3924
}
3925

    
3926
void helper_fprem1(void)
3927
{
3928
    CPU86_LDouble dblq, fpsrcop, fptemp;
3929
    CPU86_LDoubleU fpsrcop1, fptemp1;
3930
    int expdif;
3931
    signed long long int q;
3932

    
3933
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3934
        ST0 = 0.0 / 0.0; /* NaN */
3935
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3936
        return;
3937
    }
3938

    
3939
    fpsrcop = ST0;
3940
    fptemp = ST1;
3941
    fpsrcop1.d = fpsrcop;
3942
    fptemp1.d = fptemp;
3943
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3944

    
3945
    if (expdif < 0) {
3946
        /* optimisation? taken from the AMD docs */
3947
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3948
        /* ST0 is unchanged */
3949
        return;
3950
    }
3951

    
3952
    if (expdif < 53) {
3953
        dblq = fpsrcop / fptemp;
3954
        /* round dblq towards nearest integer */
3955
        dblq = rint(dblq);
3956
        ST0 = fpsrcop - fptemp * dblq;
3957

    
3958
        /* convert dblq to q by truncating towards zero */
3959
        if (dblq < 0.0)
3960
           q = (signed long long int)(-dblq);
3961
        else
3962
           q = (signed long long int)dblq;
3963

    
3964
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3965
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3966
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3967
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3968
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3969
    } else {
3970
        env->fpus |= 0x400;  /* C2 <-- 1 */
3971
        fptemp = pow(2.0, expdif - 50);
3972
        fpsrcop = (ST0 / ST1) / fptemp;
3973
        /* fpsrcop = integer obtained by chopping */
3974
        fpsrcop = (fpsrcop < 0.0) ?
3975
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3976
        ST0 -= (ST1 * fpsrcop * fptemp);
3977
    }
3978
}
3979

    
3980
void helper_fprem(void)
3981
{
3982
    CPU86_LDouble dblq, fpsrcop, fptemp;
3983
    CPU86_LDoubleU fpsrcop1, fptemp1;
3984
    int expdif;
3985
    signed long long int q;
3986

    
3987
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3988
       ST0 = 0.0 / 0.0; /* NaN */
3989
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3990
       return;
3991
    }
3992

    
3993
    fpsrcop = (CPU86_LDouble)ST0;
3994
    fptemp = (CPU86_LDouble)ST1;
3995
    fpsrcop1.d = fpsrcop;
3996
    fptemp1.d = fptemp;
3997
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3998

    
3999
    if (expdif < 0) {
4000
        /* optimisation? taken from the AMD docs */
4001
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4002
        /* ST0 is unchanged */
4003
        return;
4004
    }
4005

    
4006
    if ( expdif < 53 ) {
4007
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4008
        /* round dblq towards zero */
4009
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4010
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4011

    
4012
        /* convert dblq to q by truncating towards zero */
4013
        if (dblq < 0.0)
4014
           q = (signed long long int)(-dblq);
4015
        else
4016
           q = (signed long long int)dblq;
4017

    
4018
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4019
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4020
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4021
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4022
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4023
    } else {
4024
        int N = 32 + (expdif % 32); /* as per AMD docs */
4025
        env->fpus |= 0x400;  /* C2 <-- 1 */
4026
        fptemp = pow(2.0, (double)(expdif - N));
4027
        fpsrcop = (ST0 / ST1) / fptemp;
4028
        /* fpsrcop = integer obtained by chopping */
4029
        fpsrcop = (fpsrcop < 0.0) ?
4030
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4031
        ST0 -= (ST1 * fpsrcop * fptemp);
4032
    }
4033
}
4034

    
4035
void helper_fyl2xp1(void)
4036
{
4037
    CPU86_LDouble fptemp;
4038

    
4039
    fptemp = ST0;
4040
    if ((fptemp+1.0)>0.0) {
4041
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4042
        ST1 *= fptemp;
4043
        fpop();
4044
    } else {
4045
        env->fpus &= (~0x4700);
4046
        env->fpus |= 0x400;
4047
    }
4048
}
4049

    
4050
void helper_fsqrt(void)
4051
{
4052
    CPU86_LDouble fptemp;
4053

    
4054
    fptemp = ST0;
4055
    if (fptemp<0.0) {
4056
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4057
        env->fpus |= 0x400;
4058
    }
4059
    ST0 = sqrt(fptemp);
4060
}
4061

    
4062
void helper_fsincos(void)
4063
{
4064
    CPU86_LDouble fptemp;
4065

    
4066
    fptemp = ST0;
4067
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4068
        env->fpus |= 0x400;
4069
    } else {
4070
        ST0 = sin(fptemp);
4071
        fpush();
4072
        ST0 = cos(fptemp);
4073
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4074
        /* the above code is for  |arg| < 2**63 only */
4075
    }
4076
}
4077

    
4078
void helper_frndint(void)
4079
{
4080
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4081
}
4082

    
4083
void helper_fscale(void)
4084
{
4085
    ST0 = ldexp (ST0, (int)(ST1));
4086
}
4087

    
4088
void helper_fsin(void)
4089
{
4090
    CPU86_LDouble fptemp;
4091

    
4092
    fptemp = ST0;
4093
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4094
        env->fpus |= 0x400;
4095
    } else {
4096
        ST0 = sin(fptemp);
4097
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4098
        /* the above code is for  |arg| < 2**53 only */
4099
    }
4100
}
4101

    
4102
void helper_fcos(void)
4103
{
4104
    CPU86_LDouble fptemp;
4105

    
4106
    fptemp = ST0;
4107
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4108
        env->fpus |= 0x400;
4109
    } else {
4110
        ST0 = cos(fptemp);
4111
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4112
        /* the above code is for  |arg5 < 2**63 only */
4113
    }
4114
}
4115

    
4116
void helper_fxam_ST0(void)
4117
{
4118
    CPU86_LDoubleU temp;
4119
    int expdif;
4120

    
4121
    temp.d = ST0;
4122

    
4123
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4124
    if (SIGND(temp))
4125
        env->fpus |= 0x200; /* C1 <-- 1 */
4126

    
4127
    /* XXX: test fptags too */
4128
    expdif = EXPD(temp);
4129
    if (expdif == MAXEXPD) {
4130
#ifdef USE_X86LDOUBLE
4131
        if (MANTD(temp) == 0x8000000000000000ULL)
4132
#else
4133
        if (MANTD(temp) == 0)
4134
#endif
4135
            env->fpus |=  0x500 /*Infinity*/;
4136
        else
4137
            env->fpus |=  0x100 /*NaN*/;
4138
    } else if (expdif == 0) {
4139
        if (MANTD(temp) == 0)
4140
            env->fpus |=  0x4000 /*Zero*/;
4141
        else
4142
            env->fpus |= 0x4400 /*Denormal*/;
4143
    } else {
4144
        env->fpus |= 0x400;
4145
    }
4146
}
4147

    
4148
void helper_fstenv(target_ulong ptr, int data32)
4149
{
4150
    int fpus, fptag, exp, i;
4151
    uint64_t mant;
4152
    CPU86_LDoubleU tmp;
4153

    
4154
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4155
    fptag = 0;
4156
    for (i=7; i>=0; i--) {
4157
        fptag <<= 2;
4158
        if (env->fptags[i]) {
4159
            fptag |= 3;
4160
        } else {
4161
            tmp.d = env->fpregs[i].d;
4162
            exp = EXPD(tmp);
4163
            mant = MANTD(tmp);
4164
            if (exp == 0 && mant == 0) {
4165
                /* zero */
4166
                fptag |= 1;
4167
            } else if (exp == 0 || exp == MAXEXPD
4168
#ifdef USE_X86LDOUBLE
4169
                       || (mant & (1LL << 63)) == 0
4170
#endif
4171
                       ) {
4172
                /* NaNs, infinity, denormal */
4173
                fptag |= 2;
4174
            }
4175
        }
4176
    }
4177
    if (data32) {
4178
        /* 32 bit */
4179
        stl(ptr, env->fpuc);
4180
        stl(ptr + 4, fpus);
4181
        stl(ptr + 8, fptag);
4182
        stl(ptr + 12, 0); /* fpip */
4183
        stl(ptr + 16, 0); /* fpcs */
4184
        stl(ptr + 20, 0); /* fpoo */
4185
        stl(ptr + 24, 0); /* fpos */
4186
    } else {
4187
        /* 16 bit */
4188
        stw(ptr, env->fpuc);
4189
        stw(ptr + 2, fpus);
4190
        stw(ptr + 4, fptag);
4191
        stw(ptr + 6, 0);
4192
        stw(ptr + 8, 0);
4193
        stw(ptr + 10, 0);
4194
        stw(ptr + 12, 0);
4195
    }
4196
}
4197

    
4198
void helper_fldenv(target_ulong ptr, int data32)
4199
{
4200
    int i, fpus, fptag;
4201

    
4202
    if (data32) {
4203
        env->fpuc = lduw(ptr);
4204
        fpus = lduw(ptr + 4);
4205
        fptag = lduw(ptr + 8);
4206
    }
4207
    else {
4208
        env->fpuc = lduw(ptr);
4209
        fpus = lduw(ptr + 2);
4210
        fptag = lduw(ptr + 4);
4211
    }
4212
    env->fpstt = (fpus >> 11) & 7;
4213
    env->fpus = fpus & ~0x3800;
4214
    for(i = 0;i < 8; i++) {
4215
        env->fptags[i] = ((fptag & 3) == 3);
4216
        fptag >>= 2;
4217
    }
4218
}
4219

    
4220
void helper_fsave(target_ulong ptr, int data32)
4221
{
4222
    CPU86_LDouble tmp;
4223
    int i;
4224

    
4225
    helper_fstenv(ptr, data32);
4226

    
4227
    ptr += (14 << data32);
4228
    for(i = 0;i < 8; i++) {
4229
        tmp = ST(i);
4230
        helper_fstt(tmp, ptr);
4231
        ptr += 10;
4232
    }
4233

    
4234
    /* fninit */
4235
    env->fpus = 0;
4236
    env->fpstt = 0;
4237
    env->fpuc = 0x37f;
4238
    env->fptags[0] = 1;
4239
    env->fptags[1] = 1;
4240
    env->fptags[2] = 1;
4241
    env->fptags[3] = 1;
4242
    env->fptags[4] = 1;
4243
    env->fptags[5] = 1;
4244
    env->fptags[6] = 1;
4245
    env->fptags[7] = 1;
4246
}
4247

    
4248
void helper_frstor(target_ulong ptr, int data32)
4249
{
4250
    CPU86_LDouble tmp;
4251
    int i;
4252

    
4253
    helper_fldenv(ptr, data32);
4254
    ptr += (14 << data32);
4255

    
4256
    for(i = 0;i < 8; i++) {
4257
        tmp = helper_fldt(ptr);
4258
        ST(i) = tmp;
4259
        ptr += 10;
4260
    }
4261
}
4262

    
4263
void helper_fxsave(target_ulong ptr, int data64)
4264
{
4265
    int fpus, fptag, i, nb_xmm_regs;
4266
    CPU86_LDouble tmp;
4267
    target_ulong addr;
4268

    
4269
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4270
    fptag = 0;
4271
    for(i = 0; i < 8; i++) {
4272
        fptag |= (env->fptags[i] << i);
4273
    }
4274
    stw(ptr, env->fpuc);
4275
    stw(ptr + 2, fpus);
4276
    stw(ptr + 4, fptag ^ 0xff);
4277
#ifdef TARGET_X86_64
4278
    if (data64) {
4279
        stq(ptr + 0x08, 0); /* rip */
4280
        stq(ptr + 0x10, 0); /* rdp */
4281
    } else 
4282
#endif
4283
    {
4284
        stl(ptr + 0x08, 0); /* eip */
4285
        stl(ptr + 0x0c, 0); /* sel  */
4286
        stl(ptr + 0x10, 0); /* dp */
4287
        stl(ptr + 0x14, 0); /* sel  */
4288
    }
4289

    
4290
    addr = ptr + 0x20;
4291
    for(i = 0;i < 8; i++) {
4292
        tmp = ST(i);
4293
        helper_fstt(tmp, addr);
4294
        addr += 16;
4295
    }
4296

    
4297
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4298
        /* XXX: finish it */
4299
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4300
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4301
        if (env->hflags & HF_CS64_MASK)
4302
            nb_xmm_regs = 16;
4303
        else
4304
            nb_xmm_regs = 8;
4305
        addr = ptr + 0xa0;
4306
        for(i = 0; i < nb_xmm_regs; i++) {
4307
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4308
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4309
            addr += 16;
4310
        }
4311
    }
4312
}
4313

    
4314
void helper_fxrstor(target_ulong ptr, int data64)
4315
{
4316
    int i, fpus, fptag, nb_xmm_regs;
4317
    CPU86_LDouble tmp;
4318
    target_ulong addr;
4319

    
4320
    env->fpuc = lduw(ptr);
4321
    fpus = lduw(ptr + 2);
4322
    fptag = lduw(ptr + 4);
4323
    env->fpstt = (fpus >> 11) & 7;
4324
    env->fpus = fpus & ~0x3800;
4325
    fptag ^= 0xff;
4326
    for(i = 0;i < 8; i++) {
4327
        env->fptags[i] = ((fptag >> i) & 1);
4328
    }
4329

    
4330
    addr = ptr + 0x20;
4331
    for(i = 0;i < 8; i++) {
4332
        tmp = helper_fldt(addr);
4333
        ST(i) = tmp;
4334
        addr += 16;
4335
    }
4336

    
4337
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4338
        /* XXX: finish it */
4339
        env->mxcsr = ldl(ptr + 0x18);
4340
        //ldl(ptr + 0x1c);
4341
        if (env->hflags & HF_CS64_MASK)
4342
            nb_xmm_regs = 16;
4343
        else
4344
            nb_xmm_regs = 8;
4345
        addr = ptr + 0xa0;
4346
        for(i = 0; i < nb_xmm_regs; i++) {
4347
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4348
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4349
            addr += 16;
4350
        }
4351
    }
4352
}
4353

    
4354
#ifndef USE_X86LDOUBLE
4355

    
4356
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4357
{
4358
    CPU86_LDoubleU temp;
4359
    int e;
4360

    
4361
    temp.d = f;
4362
    /* mantissa */
4363
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4364
    /* exponent + sign */
4365
    e = EXPD(temp) - EXPBIAS + 16383;
4366
    e |= SIGND(temp) >> 16;
4367
    *pexp = e;
4368
}
4369

    
4370
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4371
{
4372
    CPU86_LDoubleU temp;
4373
    int e;
4374
    uint64_t ll;
4375

    
4376
    /* XXX: handle overflow ? */
4377
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4378
    e |= (upper >> 4) & 0x800; /* sign */
4379
    ll = (mant >> 11) & ((1LL << 52) - 1);
4380
#ifdef __arm__
4381
    temp.l.upper = (e << 20) | (ll >> 32);
4382
    temp.l.lower = ll;
4383
#else
4384
    temp.ll = ll | ((uint64_t)e << 52);
4385
#endif
4386
    return temp.d;
4387
}
4388

    
4389
#else
4390

    
4391
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4392
{
4393
    CPU86_LDoubleU temp;
4394

    
4395
    temp.d = f;
4396
    *pmant = temp.l.lower;
4397
    *pexp = temp.l.upper;
4398
}
4399

    
4400
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4401
{
4402
    CPU86_LDoubleU temp;
4403

    
4404
    temp.l.upper = upper;
4405
    temp.l.lower = mant;
4406
    return temp.d;
4407
}
4408
#endif
4409

    
4410
#ifdef TARGET_X86_64
4411

    
4412
//#define DEBUG_MULDIV
4413

    
4414
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4415
{
4416
    *plow += a;
4417
    /* carry test */
4418
    if (*plow < a)
4419
        (*phigh)++;
4420
    *phigh += b;
4421
}
4422

    
4423
static void neg128(uint64_t *plow, uint64_t *phigh)
4424
{
4425
    *plow = ~ *plow;
4426
    *phigh = ~ *phigh;
4427
    add128(plow, phigh, 1, 0);
4428
}
4429

    
4430
/* return TRUE if overflow */
4431
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4432
{
4433
    uint64_t q, r, a1, a0;
4434
    int i, qb, ab;
4435

    
4436
    a0 = *plow;
4437
    a1 = *phigh;
4438
    if (a1 == 0) {
4439
        q = a0 / b;
4440
        r = a0 % b;
4441
        *plow = q;
4442
        *phigh = r;
4443
    } else {
4444
        if (a1 >= b)
4445
            return 1;
4446
        /* XXX: use a better algorithm */
4447
        for(i = 0; i < 64; i++) {
4448
            ab = a1 >> 63;
4449
            a1 = (a1 << 1) | (a0 >> 63);
4450
            if (ab || a1 >= b) {
4451
                a1 -= b;
4452
                qb = 1;
4453
            } else {
4454
                qb = 0;
4455
            }
4456
            a0 = (a0 << 1) | qb;
4457
        }
4458
#if defined(DEBUG_MULDIV)
4459
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4460
               *phigh, *plow, b, a0, a1);
4461
#endif
4462
        *plow = a0;
4463
        *phigh = a1;
4464
    }
4465
    return 0;
4466
}
4467

    
4468
/* return TRUE if overflow */
4469
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4470
{
4471
    int sa, sb;
4472
    sa = ((int64_t)*phigh < 0);
4473
    if (sa)
4474
        neg128(plow, phigh);
4475
    sb = (b < 0);
4476
    if (sb)
4477
        b = -b;
4478
    if (div64(plow, phigh, b) != 0)
4479
        return 1;
4480
    if (sa ^ sb) {
4481
        if (*plow > (1ULL << 63))
4482
            return 1;
4483
        *plow = - *plow;
4484
    } else {
4485
        if (*plow >= (1ULL << 63))
4486
            return 1;
4487
    }
4488
    if (sa)
4489
        *phigh = - *phigh;
4490
    return 0;
4491
}
4492

    
4493
void helper_mulq_EAX_T0(target_ulong t0)
4494
{
4495
    uint64_t r0, r1;
4496

    
4497
    mulu64(&r0, &r1, EAX, t0);
4498
    EAX = r0;
4499
    EDX = r1;
4500
    CC_DST = r0;
4501
    CC_SRC = r1;
4502
}
4503

    
4504
void helper_imulq_EAX_T0(target_ulong t0)
4505
{
4506
    uint64_t r0, r1;
4507

    
4508
    muls64(&r0, &r1, EAX, t0);
4509
    EAX = r0;
4510
    EDX = r1;
4511
    CC_DST = r0;
4512
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4513
}
4514

    
4515
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4516
{
4517
    uint64_t r0, r1;
4518

    
4519
    muls64(&r0, &r1, t0, t1);
4520
    CC_DST = r0;
4521
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4522
    return r0;
4523
}
4524

    
4525
void helper_divq_EAX(target_ulong t0)
4526
{
4527
    uint64_t r0, r1;
4528
    if (t0 == 0) {
4529
        raise_exception(EXCP00_DIVZ);
4530
    }
4531
    r0 = EAX;
4532
    r1 = EDX;
4533
    if (div64(&r0, &r1, t0))
4534
        raise_exception(EXCP00_DIVZ);
4535
    EAX = r0;
4536
    EDX = r1;
4537
}
4538

    
4539
void helper_idivq_EAX(target_ulong t0)
4540
{
4541
    uint64_t r0, r1;
4542
    if (t0 == 0) {
4543
        raise_exception(EXCP00_DIVZ);
4544
    }
4545
    r0 = EAX;
4546
    r1 = EDX;
4547
    if (idiv64(&r0, &r1, t0))
4548
        raise_exception(EXCP00_DIVZ);
4549
    EAX = r0;
4550
    EDX = r1;
4551
}
4552
#endif
4553

    
4554
void helper_hlt(void)
4555
{
4556
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4557
    
4558
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4559
    env->halted = 1;
4560
    env->exception_index = EXCP_HLT;
4561
    cpu_loop_exit();
4562
}
4563

    
4564
void helper_monitor(target_ulong ptr)
4565
{
4566
    if ((uint32_t)ECX != 0)
4567
        raise_exception(EXCP0D_GPF);
4568
    /* XXX: store address ? */
4569
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4570
}
4571

    
4572
void helper_mwait(void)
4573
{
4574
    if ((uint32_t)ECX != 0)
4575
        raise_exception(EXCP0D_GPF);
4576
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4577
    /* XXX: not complete but not completely erroneous */
4578
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4579
        /* more than one CPU: do not sleep because another CPU may
4580
           wake this one */
4581
    } else {
4582
        helper_hlt();
4583
    }
4584
}
4585

    
4586
void helper_debug(void)
4587
{
4588
    env->exception_index = EXCP_DEBUG;
4589
    cpu_loop_exit();
4590
}
4591

    
4592
void helper_raise_interrupt(int intno, int next_eip_addend)
4593
{
4594
    raise_interrupt(intno, 1, 0, next_eip_addend);
4595
}
4596

    
4597
void helper_raise_exception(int exception_index)
4598
{
4599
    raise_exception(exception_index);
4600
}
4601

    
4602
void helper_cli(void)
4603
{
4604
    env->eflags &= ~IF_MASK;
4605
}
4606

    
4607
void helper_sti(void)
4608
{
4609
    env->eflags |= IF_MASK;
4610
}
4611

    
4612
#if 0
4613
/* vm86plus instructions */
4614
void helper_cli_vm(void)
4615
{
4616
    env->eflags &= ~VIF_MASK;
4617
}
4618

4619
void helper_sti_vm(void)
4620
{
4621
    env->eflags |= VIF_MASK;
4622
    if (env->eflags & VIP_MASK) {
4623
        raise_exception(EXCP0D_GPF);
4624
    }
4625
}
4626
#endif
4627

    
4628
void helper_set_inhibit_irq(void)
4629
{
4630
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4631
}
4632

    
4633
void helper_reset_inhibit_irq(void)
4634
{
4635
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4636
}
4637

    
4638
void helper_boundw(target_ulong a0, int v)
4639
{
4640
    int low, high;
4641
    low = ldsw(a0);
4642
    high = ldsw(a0 + 2);
4643
    v = (int16_t)v;
4644
    if (v < low || v > high) {
4645
        raise_exception(EXCP05_BOUND);
4646
    }
4647
    FORCE_RET();
4648
}
4649

    
4650
void helper_boundl(target_ulong a0, int v)
4651
{
4652
    int low, high;
4653
    low = ldl(a0);
4654
    high = ldl(a0 + 4);
4655
    if (v < low || v > high) {
4656
        raise_exception(EXCP05_BOUND);
4657
    }
4658
    FORCE_RET();
4659
}
4660

    
4661
static float approx_rsqrt(float a)
4662
{
4663
    return 1.0 / sqrt(a);
4664
}
4665

    
4666
static float approx_rcp(float a)
4667
{
4668
    return 1.0 / a;
4669
}
4670

    
4671
#if !defined(CONFIG_USER_ONLY)
4672

    
4673
#define MMUSUFFIX _mmu
4674

    
4675
#define SHIFT 0
4676
#include "softmmu_template.h"
4677

    
4678
#define SHIFT 1
4679
#include "softmmu_template.h"
4680

    
4681
#define SHIFT 2
4682
#include "softmmu_template.h"
4683

    
4684
#define SHIFT 3
4685
#include "softmmu_template.h"
4686

    
4687
#endif
4688

    
4689
/* try to fill the TLB and return an exception if error. If retaddr is
4690
   NULL, it means that the function was called in C code (i.e. not
4691
   from generated code or from helper.c) */
4692
/* XXX: fix it to restore all registers */
4693
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4694
{
4695
    TranslationBlock *tb;
4696
    int ret;
4697
    unsigned long pc;
4698
    CPUX86State *saved_env;
4699

    
4700
    /* XXX: hack to restore env in all cases, even if not called from
4701
       generated code */
4702
    saved_env = env;
4703
    env = cpu_single_env;
4704

    
4705
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4706
    if (ret) {
4707
        if (retaddr) {
4708
            /* now we have a real cpu fault */
4709
            pc = (unsigned long)retaddr;
4710
            tb = tb_find_pc(pc);
4711
            if (tb) {
4712
                /* the PC is inside the translated code. It means that we have
4713
                   a virtual CPU fault */
4714
                cpu_restore_state(tb, env, pc, NULL);
4715
            }
4716
        }
4717
        raise_exception_err(env->exception_index, env->error_code);
4718
    }
4719
    env = saved_env;
4720
}
4721

    
4722

    
4723
/* Secure Virtual Machine helpers */
4724

    
4725
#if defined(CONFIG_USER_ONLY)
4726

    
4727
void helper_vmrun(void) 
4728
{ 
4729
}
4730
void helper_vmmcall(void) 
4731
{ 
4732
}
4733
void helper_vmload(void) 
4734
{ 
4735
}
4736
void helper_vmsave(void) 
4737
{ 
4738
}
4739
void helper_stgi(void)
4740
{
4741
}
4742
void helper_clgi(void)
4743
{
4744
}
4745
void helper_skinit(void) 
4746
{ 
4747
}
4748
void helper_invlpga(void) 
4749
{ 
4750
}
4751
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4752
{ 
4753
}
4754
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4755
{
4756
}
4757

    
4758
void helper_svm_check_io(uint32_t port, uint32_t param, 
4759
                         uint32_t next_eip_addend)
4760
{
4761
}
4762
#else
4763

    
4764
static inline void svm_save_seg(target_phys_addr_t addr,
4765
                                const SegmentCache *sc)
4766
{
4767
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4768
             sc->selector);
4769
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4770
             sc->base);
4771
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4772
             sc->limit);
4773
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4774
             (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4775
}
4776
                                
4777
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4778
{
4779
    unsigned int flags;
4780

    
4781
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4782
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4783
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4784
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4785
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4786
}
4787

    
4788
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4789
                                      CPUState *env, int seg_reg)
4790
{
4791
    SegmentCache sc1, *sc = &sc1;
4792
    svm_load_seg(addr, sc);
4793
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4794
                           sc->base, sc->limit, sc->flags);
4795
}
4796

    
4797
void helper_vmrun(void)
4798
{
4799
    target_ulong addr;
4800
    uint32_t event_inj;
4801
    uint32_t int_ctl;
4802

    
4803
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4804

    
4805
    addr = EAX;
4806
    if (loglevel & CPU_LOG_TB_IN_ASM)
4807
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4808

    
4809
    env->vm_vmcb = addr;
4810

    
4811
    /* save the current CPU state in the hsave page */
4812
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4813
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4814

    
4815
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4816
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4817

    
4818
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4819
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4820
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4821
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4822
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4823
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4824
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4825

    
4826
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4827
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4828

    
4829
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4830
                  &env->segs[R_ES]);
4831
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4832
                 &env->segs[R_CS]);
4833
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4834
                 &env->segs[R_SS]);
4835
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4836
                 &env->segs[R_DS]);
4837

    
4838
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4839
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4840
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4841

    
4842
    /* load the interception bitmaps so we do not need to access the
4843
       vmcb in svm mode */
4844
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4845
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4846
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4847
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4848
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4849
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4850

    
4851
    /* enable intercepts */
4852
    env->hflags |= HF_SVMI_MASK;
4853

    
4854
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4855
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4856

    
4857
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4858
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4859

    
4860
    /* clear exit_info_2 so we behave like the real hardware */
4861
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4862

    
4863
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4864
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4865
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4866
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4867
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4868
    if (int_ctl & V_INTR_MASKING_MASK) {
4869
        env->cr[8] = int_ctl & V_TPR_MASK;
4870
        cpu_set_apic_tpr(env, env->cr[8]);
4871
        if (env->eflags & IF_MASK)
4872
            env->hflags |= HF_HIF_MASK;
4873
    }
4874

    
4875
#ifdef TARGET_X86_64
4876
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4877
    env->hflags &= ~HF_LMA_MASK;
4878
    if (env->efer & MSR_EFER_LMA)
4879
       env->hflags |= HF_LMA_MASK;
4880
#endif
4881
    env->eflags = 0;
4882
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4883
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4884
    CC_OP = CC_OP_EFLAGS;
4885

    
4886
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4887
                       env, R_ES);
4888
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4889
                       env, R_CS);
4890
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4891
                       env, R_SS);
4892
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4893
                       env, R_DS);
4894

    
4895
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4896
    env->eip = EIP;
4897
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4898
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4899
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4900
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4901
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4902

    
4903
    /* FIXME: guest state consistency checks */
4904

    
4905
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4906
        case TLB_CONTROL_DO_NOTHING:
4907
            break;
4908
        case TLB_CONTROL_FLUSH_ALL_ASID:
4909
            /* FIXME: this is not 100% correct but should work for now */
4910
            tlb_flush(env, 1);
4911
        break;
4912
    }
4913

    
4914
    helper_stgi();
4915

    
4916
    /* maybe we need to inject an event */
4917
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4918
    if (event_inj & SVM_EVTINJ_VALID) {
4919
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4920
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4921
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4922
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4923

    
4924
        if (loglevel & CPU_LOG_TB_IN_ASM)
4925
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4926
        /* FIXME: need to implement valid_err */
4927
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4928
        case SVM_EVTINJ_TYPE_INTR:
4929
                env->exception_index = vector;
4930
                env->error_code = event_inj_err;
4931
                env->exception_is_int = 0;
4932
                env->exception_next_eip = -1;
4933
                if (loglevel & CPU_LOG_TB_IN_ASM)
4934
                    fprintf(logfile, "INTR");
4935
                break;
4936
        case SVM_EVTINJ_TYPE_NMI:
4937
                env->exception_index = vector;
4938
                env->error_code = event_inj_err;
4939
                env->exception_is_int = 0;
4940
                env->exception_next_eip = EIP;
4941
                if (loglevel & CPU_LOG_TB_IN_ASM)
4942
                    fprintf(logfile, "NMI");
4943
                break;
4944
        case SVM_EVTINJ_TYPE_EXEPT:
4945
                env->exception_index = vector;
4946
                env->error_code = event_inj_err;
4947
                env->exception_is_int = 0;
4948
                env->exception_next_eip = -1;
4949
                if (loglevel & CPU_LOG_TB_IN_ASM)
4950
                    fprintf(logfile, "EXEPT");
4951
                break;
4952
        case SVM_EVTINJ_TYPE_SOFT:
4953
                env->exception_index = vector;
4954
                env->error_code = event_inj_err;
4955
                env->exception_is_int = 1;
4956
                env->exception_next_eip = EIP;
4957
                if (loglevel & CPU_LOG_TB_IN_ASM)
4958
                    fprintf(logfile, "SOFT");
4959
                break;
4960
        }
4961
        if (loglevel & CPU_LOG_TB_IN_ASM)
4962
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4963
    }
4964
    if ((int_ctl & V_IRQ_MASK) || 
4965
        (env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
4966
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4967
    }
4968

    
4969
    cpu_loop_exit();
4970
}
4971

    
4972
void helper_vmmcall(void)
4973
{
4974
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4975
    raise_exception(EXCP06_ILLOP);
4976
}
4977

    
4978
void helper_vmload(void)
4979
{
4980
    target_ulong addr;
4981
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4982

    
4983
    /* XXX: invalid in 32 bit */
4984
    addr = EAX;
4985
    if (loglevel & CPU_LOG_TB_IN_ASM)
4986
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4987
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4988
                env->segs[R_FS].base);
4989

    
4990
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4991
                       env, R_FS);
4992
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4993
                       env, R_GS);
4994
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4995
                 &env->tr);
4996
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4997
                 &env->ldt);
4998

    
4999
#ifdef TARGET_X86_64
5000
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5001
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5002
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5003
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5004
#endif
5005
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5006
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5007
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5008
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5009
}
5010

    
5011
void helper_vmsave(void)
5012
{
5013
    target_ulong addr;
5014
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5015
    addr = EAX;
5016
    if (loglevel & CPU_LOG_TB_IN_ASM)
5017
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5018
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5019
                env->segs[R_FS].base);
5020

    
5021
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5022
                 &env->segs[R_FS]);
5023
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5024
                 &env->segs[R_GS]);
5025
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5026
                 &env->tr);
5027
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5028
                 &env->ldt);
5029

    
5030
#ifdef TARGET_X86_64
5031
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5032
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5033
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5034
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5035
#endif
5036
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5037
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5038
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5039
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5040
}
5041

    
5042
void helper_stgi(void)
5043
{
5044
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5045
    env->hflags |= HF_GIF_MASK;
5046
}
5047

    
5048
void helper_clgi(void)
5049
{
5050
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5051
    env->hflags &= ~HF_GIF_MASK;
5052
}
5053

    
5054
void helper_skinit(void)
5055
{
5056
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5057
    /* XXX: not implemented */
5058
    if (loglevel & CPU_LOG_TB_IN_ASM)
5059
        fprintf(logfile,"skinit!\n");
5060
    raise_exception(EXCP06_ILLOP);
5061
}
5062

    
5063
void helper_invlpga(void)
5064
{
5065
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5066
    tlb_flush(env, 0);
5067
}
5068

    
5069
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5070
{
5071
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5072
        return;
5073
    switch(type) {
5074
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5075
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5076
            helper_vmexit(type, param);
5077
        }
5078
        break;
5079
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5080
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5081
            helper_vmexit(type, param);
5082
        }
5083
        break;
5084
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5085
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5086
            helper_vmexit(type, param);
5087
        }
5088
        break;
5089
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5090
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5091
            helper_vmexit(type, param);
5092
        }
5093
        break;
5094
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5095
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5096
            helper_vmexit(type, param);
5097
        }
5098
        break;
5099
    case SVM_EXIT_MSR:
5100
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5101
            /* FIXME: this should be read in at vmrun (faster this way?) */
5102
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5103
            uint32_t t0, t1;
5104
            switch((uint32_t)ECX) {
5105
            case 0 ... 0x1fff:
5106
                t0 = (ECX * 2) % 8;
5107
                t1 = ECX / 8;
5108
                break;
5109
            case 0xc0000000 ... 0xc0001fff:
5110
                t0 = (8192 + ECX - 0xc0000000) * 2;
5111
                t1 = (t0 / 8);
5112
                t0 %= 8;
5113
                break;
5114
            case 0xc0010000 ... 0xc0011fff:
5115
                t0 = (16384 + ECX - 0xc0010000) * 2;
5116
                t1 = (t0 / 8);
5117
                t0 %= 8;
5118
                break;
5119
            default:
5120
                helper_vmexit(type, param);
5121
                t0 = 0;
5122
                t1 = 0;
5123
                break;
5124
            }
5125
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5126
                helper_vmexit(type, param);
5127
        }
5128
        break;
5129
    default:
5130
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5131
            helper_vmexit(type, param);
5132
        }
5133
        break;
5134
    }
5135
}
5136

    
5137
void helper_svm_check_io(uint32_t port, uint32_t param, 
5138
                         uint32_t next_eip_addend)
5139
{
5140
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5141
        /* FIXME: this should be read in at vmrun (faster this way?) */
5142
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5143
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5144
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5145
            /* next EIP */
5146
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5147
                     env->eip + next_eip_addend);
5148
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5149
        }
5150
    }
5151
}
5152

    
5153
/* Note: currently only 32 bits of exit_code are used */
5154
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5155
{
5156
    uint32_t int_ctl;
5157

    
5158
    if (loglevel & CPU_LOG_TB_IN_ASM)
5159
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5160
                exit_code, exit_info_1,
5161
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5162
                EIP);
5163

    
5164
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5165
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5166
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5167
    } else {
5168
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5169
    }
5170

    
5171
    /* Save the VM state in the vmcb */
5172
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5173
                 &env->segs[R_ES]);
5174
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5175
                 &env->segs[R_CS]);
5176
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5177
                 &env->segs[R_SS]);
5178
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5179
                 &env->segs[R_DS]);
5180

    
5181
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5182
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5183

    
5184
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5185
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5186

    
5187
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5188
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5189
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5190
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5191
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5192

    
5193
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5194
        int_ctl &= ~V_TPR_MASK;
5195
        int_ctl |= env->cr[8] & V_TPR_MASK;
5196
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5197
    }
5198

    
5199
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5200
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5201
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5202
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5203
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5204
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5205
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5206

    
5207
    /* Reload the host state from vm_hsave */
5208
    env->hflags &= ~HF_HIF_MASK;
5209
    env->hflags &= ~HF_SVMI_MASK;
5210
    env->intercept = 0;
5211
    env->intercept_exceptions = 0;
5212
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5213

    
5214
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5215
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5216

    
5217
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5218
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5219

    
5220
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5221
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5222
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5223
    if (int_ctl & V_INTR_MASKING_MASK) {
5224
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5225
        cpu_set_apic_tpr(env, env->cr[8]);
5226
    }
5227
    /* we need to set the efer after the crs so the hidden flags get set properly */
5228
#ifdef TARGET_X86_64
5229
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5230
    env->hflags &= ~HF_LMA_MASK;
5231
    if (env->efer & MSR_EFER_LMA)
5232
       env->hflags |= HF_LMA_MASK;
5233
    /* XXX: should also emulate the VM_CR MSR */
5234
    env->hflags &= ~HF_SVME_MASK;
5235
    if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5236
        if (env->efer & MSR_EFER_SVME)
5237
            env->hflags |= HF_SVME_MASK;
5238
    } else {
5239
        env->efer &= ~MSR_EFER_SVME;
5240
    }
5241
#endif
5242

    
5243
    env->eflags = 0;
5244
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5245
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5246
    CC_OP = CC_OP_EFLAGS;
5247

    
5248
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5249
                       env, R_ES);
5250
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5251
                       env, R_CS);
5252
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5253
                       env, R_SS);
5254
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5255
                       env, R_DS);
5256

    
5257
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5258
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5259
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5260

    
5261
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5262
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5263

    
5264
    /* other setups */
5265
    cpu_x86_set_cpl(env, 0);
5266
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5267
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5268

    
5269
    helper_clgi();
5270
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5271

    
5272
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5273

    
5274
    /* Clears the TSC_OFFSET inside the processor. */
5275

    
5276
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5277
       from the page table indicated the host's CR3. If the PDPEs contain
5278
       illegal state, the processor causes a shutdown. */
5279

    
5280
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5281
    env->cr[0] |= CR0_PE_MASK;
5282
    env->eflags &= ~VM_MASK;
5283

    
5284
    /* Disables all breakpoints in the host DR7 register. */
5285

    
5286
    /* Checks the reloaded host state for consistency. */
5287

    
5288
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5289
       host's code segment or non-canonical (in the case of long mode), a
5290
       #GP fault is delivered inside the host.) */
5291

    
5292
    /* remove any pending exception */
5293
    env->exception_index = -1;
5294
    env->error_code = 0;
5295
    env->old_exception = -1;
5296

    
5297
    cpu_loop_exit();
5298
}
5299

    
5300
#endif
5301

    
5302
/* MMX/SSE */
5303
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5304
void helper_enter_mmx(void)
5305
{
5306
    env->fpstt = 0;
5307
    *(uint32_t *)(env->fptags) = 0;
5308
    *(uint32_t *)(env->fptags + 4) = 0;
5309
}
5310

    
5311
void helper_emms(void)
5312
{
5313
    /* set to empty state */
5314
    *(uint32_t *)(env->fptags) = 0x01010101;
5315
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5316
}
5317

    
5318
/* XXX: suppress */
5319
void helper_movq(uint64_t *d, uint64_t *s)
5320
{
5321
    *d = *s;
5322
}
5323

    
5324
#define SHIFT 0
5325
#include "ops_sse.h"
5326

    
5327
#define SHIFT 1
5328
#include "ops_sse.h"
5329

    
5330
#define SHIFT 0
5331
#include "helper_template.h"
5332
#undef SHIFT
5333

    
5334
#define SHIFT 1
5335
#include "helper_template.h"
5336
#undef SHIFT
5337

    
5338
#define SHIFT 2
5339
#include "helper_template.h"
5340
#undef SHIFT
5341

    
5342
#ifdef TARGET_X86_64
5343

    
5344
#define SHIFT 3
5345
#include "helper_template.h"
5346
#undef SHIFT
5347

    
5348
#endif
5349

    
5350
/* bit operations */
5351
target_ulong helper_bsf(target_ulong t0)
5352
{
5353
    int count;
5354
    target_ulong res;
5355

    
5356
    res = t0;
5357
    count = 0;
5358
    while ((res & 1) == 0) {
5359
        count++;
5360
        res >>= 1;
5361
    }
5362
    return count;
5363
}
5364

    
5365
target_ulong helper_bsr(target_ulong t0)
5366
{
5367
    int count;
5368
    target_ulong res, mask;
5369
    
5370
    res = t0;
5371
    count = TARGET_LONG_BITS - 1;
5372
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5373
    while ((res & mask) == 0) {
5374
        count--;
5375
        res <<= 1;
5376
    }
5377
    return count;
5378
}
5379

    
5380

    
5381
static int compute_all_eflags(void)
5382
{
5383
    return CC_SRC;
5384
}
5385

    
5386
static int compute_c_eflags(void)
5387
{
5388
    return CC_SRC & CC_C;
5389
}
5390

    
5391
CCTable cc_table[CC_OP_NB] = {
5392
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5393

    
5394
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5395

    
5396
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5397
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5398
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5399

    
5400
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5401
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5402
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5403

    
5404
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5405
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5406
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5407

    
5408
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5409
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5410
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5411

    
5412
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5413
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5414
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5415

    
5416
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5417
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5418
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5419

    
5420
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5421
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5422
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5423

    
5424
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5425
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5426
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5427

    
5428
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5429
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5430
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5431

    
5432
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5433
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5434
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5435

    
5436
#ifdef TARGET_X86_64
5437
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5438

    
5439
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5440

    
5441
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5442

    
5443
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5444

    
5445
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5446

    
5447
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5448

    
5449
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5450

    
5451
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5452

    
5453
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5454

    
5455
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5456
#endif
5457
};
5458