Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 0b97134b

History | View | Annotate | Download (153.7 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = helper_cc_compute_all(CC_OP);
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499

    
500
#ifndef CONFIG_USER_ONLY
501
    /* reset local breakpoints */
502
    if (env->dr[7] & 0x55) {
503
        for (i = 0; i < 4; i++) {
504
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
505
                hw_breakpoint_remove(env, i);
506
        }
507
        env->dr[7] &= ~0x55;
508
    }
509
#endif
510
}
511

    
512
/* check if Port I/O is allowed in TSS */
513
static inline void check_io(int addr, int size)
514
{
515
    int io_offset, val, mask;
516

    
517
    /* TSS must be a valid 32 bit one */
518
    if (!(env->tr.flags & DESC_P_MASK) ||
519
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
520
        env->tr.limit < 103)
521
        goto fail;
522
    io_offset = lduw_kernel(env->tr.base + 0x66);
523
    io_offset += (addr >> 3);
524
    /* Note: the check needs two bytes */
525
    if ((io_offset + 1) > env->tr.limit)
526
        goto fail;
527
    val = lduw_kernel(env->tr.base + io_offset);
528
    val >>= (addr & 7);
529
    mask = (1 << size) - 1;
530
    /* all bits must be zero to allow the I/O */
531
    if ((val & mask) != 0) {
532
    fail:
533
        raise_exception_err(EXCP0D_GPF, 0);
534
    }
535
}
536

    
537
void helper_check_iob(uint32_t t0)
538
{
539
    check_io(t0, 1);
540
}
541

    
542
void helper_check_iow(uint32_t t0)
543
{
544
    check_io(t0, 2);
545
}
546

    
547
void helper_check_iol(uint32_t t0)
548
{
549
    check_io(t0, 4);
550
}
551

    
552
void helper_outb(uint32_t port, uint32_t data)
553
{
554
    cpu_outb(env, port, data & 0xff);
555
}
556

    
557
target_ulong helper_inb(uint32_t port)
558
{
559
    return cpu_inb(env, port);
560
}
561

    
562
void helper_outw(uint32_t port, uint32_t data)
563
{
564
    cpu_outw(env, port, data & 0xffff);
565
}
566

    
567
target_ulong helper_inw(uint32_t port)
568
{
569
    return cpu_inw(env, port);
570
}
571

    
572
void helper_outl(uint32_t port, uint32_t data)
573
{
574
    cpu_outl(env, port, data);
575
}
576

    
577
target_ulong helper_inl(uint32_t port)
578
{
579
    return cpu_inl(env, port);
580
}
581

    
582
static inline unsigned int get_sp_mask(unsigned int e2)
583
{
584
    if (e2 & DESC_B_MASK)
585
        return 0xffffffff;
586
    else
587
        return 0xffff;
588
}
589

    
590
#ifdef TARGET_X86_64
591
#define SET_ESP(val, sp_mask)\
592
do {\
593
    if ((sp_mask) == 0xffff)\
594
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
595
    else if ((sp_mask) == 0xffffffffLL)\
596
        ESP = (uint32_t)(val);\
597
    else\
598
        ESP = (val);\
599
} while (0)
600
#else
601
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
602
#endif
603

    
604
/* in 64-bit machines, this can overflow. So this segment addition macro
605
 * can be used to trim the value to 32-bit whenever needed */
606
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
607

    
608
/* XXX: add a is_user flag to have proper security support */
609
#define PUSHW(ssp, sp, sp_mask, val)\
610
{\
611
    sp -= 2;\
612
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
613
}
614

    
615
#define PUSHL(ssp, sp, sp_mask, val)\
616
{\
617
    sp -= 4;\
618
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
619
}
620

    
621
#define POPW(ssp, sp, sp_mask, val)\
622
{\
623
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
624
    sp += 2;\
625
}
626

    
627
#define POPL(ssp, sp, sp_mask, val)\
628
{\
629
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
630
    sp += 4;\
631
}
632

    
633
/* protected mode interrupt */
634
static void do_interrupt_protected(int intno, int is_int, int error_code,
635
                                   unsigned int next_eip, int is_hw)
636
{
637
    SegmentCache *dt;
638
    target_ulong ptr, ssp;
639
    int type, dpl, selector, ss_dpl, cpl;
640
    int has_error_code, new_stack, shift;
641
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
642
    uint32_t old_eip, sp_mask;
643

    
644
    has_error_code = 0;
645
    if (!is_int && !is_hw) {
646
        switch(intno) {
647
        case 8:
648
        case 10:
649
        case 11:
650
        case 12:
651
        case 13:
652
        case 14:
653
        case 17:
654
            has_error_code = 1;
655
            break;
656
        }
657
    }
658
    if (is_int)
659
        old_eip = next_eip;
660
    else
661
        old_eip = env->eip;
662

    
663
    dt = &env->idt;
664
    if (intno * 8 + 7 > dt->limit)
665
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
666
    ptr = dt->base + intno * 8;
667
    e1 = ldl_kernel(ptr);
668
    e2 = ldl_kernel(ptr + 4);
669
    /* check gate type */
670
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
671
    switch(type) {
672
    case 5: /* task gate */
673
        /* must do that check here to return the correct error code */
674
        if (!(e2 & DESC_P_MASK))
675
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
676
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
677
        if (has_error_code) {
678
            int type;
679
            uint32_t mask;
680
            /* push the error code */
681
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
682
            shift = type >> 3;
683
            if (env->segs[R_SS].flags & DESC_B_MASK)
684
                mask = 0xffffffff;
685
            else
686
                mask = 0xffff;
687
            esp = (ESP - (2 << shift)) & mask;
688
            ssp = env->segs[R_SS].base + esp;
689
            if (shift)
690
                stl_kernel(ssp, error_code);
691
            else
692
                stw_kernel(ssp, error_code);
693
            SET_ESP(esp, mask);
694
        }
695
        return;
696
    case 6: /* 286 interrupt gate */
697
    case 7: /* 286 trap gate */
698
    case 14: /* 386 interrupt gate */
699
    case 15: /* 386 trap gate */
700
        break;
701
    default:
702
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
703
        break;
704
    }
705
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
706
    cpl = env->hflags & HF_CPL_MASK;
707
    /* check privilege if software int */
708
    if (is_int && dpl < cpl)
709
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
710
    /* check valid bit */
711
    if (!(e2 & DESC_P_MASK))
712
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
713
    selector = e1 >> 16;
714
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
715
    if ((selector & 0xfffc) == 0)
716
        raise_exception_err(EXCP0D_GPF, 0);
717

    
718
    if (load_segment(&e1, &e2, selector) != 0)
719
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
720
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
721
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
722
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
723
    if (dpl > cpl)
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
    if (!(e2 & DESC_P_MASK))
726
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
727
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
728
        /* to inner privilege */
729
        get_ss_esp_from_tss(&ss, &esp, dpl);
730
        if ((ss & 0xfffc) == 0)
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        if ((ss & 3) != dpl)
733
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
735
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
736
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
737
        if (ss_dpl != dpl)
738
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739
        if (!(ss_e2 & DESC_S_MASK) ||
740
            (ss_e2 & DESC_CS_MASK) ||
741
            !(ss_e2 & DESC_W_MASK))
742
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
        if (!(ss_e2 & DESC_P_MASK))
744
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
        new_stack = 1;
746
        sp_mask = get_sp_mask(ss_e2);
747
        ssp = get_seg_base(ss_e1, ss_e2);
748
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
749
        /* to same privilege */
750
        if (env->eflags & VM_MASK)
751
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
752
        new_stack = 0;
753
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
754
        ssp = env->segs[R_SS].base;
755
        esp = ESP;
756
        dpl = cpl;
757
    } else {
758
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
759
        new_stack = 0; /* avoid warning */
760
        sp_mask = 0; /* avoid warning */
761
        ssp = 0; /* avoid warning */
762
        esp = 0; /* avoid warning */
763
    }
764

    
765
    shift = type >> 3;
766

    
767
#if 0
768
    /* XXX: check that enough room is available */
769
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
770
    if (env->eflags & VM_MASK)
771
        push_size += 8;
772
    push_size <<= shift;
773
#endif
774
    if (shift == 1) {
775
        if (new_stack) {
776
            if (env->eflags & VM_MASK) {
777
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
778
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
779
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
780
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
781
            }
782
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
783
            PUSHL(ssp, esp, sp_mask, ESP);
784
        }
785
        PUSHL(ssp, esp, sp_mask, compute_eflags());
786
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
787
        PUSHL(ssp, esp, sp_mask, old_eip);
788
        if (has_error_code) {
789
            PUSHL(ssp, esp, sp_mask, error_code);
790
        }
791
    } else {
792
        if (new_stack) {
793
            if (env->eflags & VM_MASK) {
794
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
795
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
796
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
797
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
798
            }
799
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
800
            PUSHW(ssp, esp, sp_mask, ESP);
801
        }
802
        PUSHW(ssp, esp, sp_mask, compute_eflags());
803
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
804
        PUSHW(ssp, esp, sp_mask, old_eip);
805
        if (has_error_code) {
806
            PUSHW(ssp, esp, sp_mask, error_code);
807
        }
808
    }
809

    
810
    if (new_stack) {
811
        if (env->eflags & VM_MASK) {
812
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
813
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
814
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
815
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
816
        }
817
        ss = (ss & ~3) | dpl;
818
        cpu_x86_load_seg_cache(env, R_SS, ss,
819
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
820
    }
821
    SET_ESP(esp, sp_mask);
822

    
823
    selector = (selector & ~3) | dpl;
824
    cpu_x86_load_seg_cache(env, R_CS, selector,
825
                   get_seg_base(e1, e2),
826
                   get_seg_limit(e1, e2),
827
                   e2);
828
    cpu_x86_set_cpl(env, dpl);
829
    env->eip = offset;
830

    
831
    /* interrupt gate clear IF mask */
832
    if ((type & 1) == 0) {
833
        env->eflags &= ~IF_MASK;
834
    }
835
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
836
}
837

    
838
#ifdef TARGET_X86_64
839

    
840
#define PUSHQ(sp, val)\
841
{\
842
    sp -= 8;\
843
    stq_kernel(sp, (val));\
844
}
845

    
846
#define POPQ(sp, val)\
847
{\
848
    val = ldq_kernel(sp);\
849
    sp += 8;\
850
}
851

    
852
static inline target_ulong get_rsp_from_tss(int level)
853
{
854
    int index;
855

    
856
#if 0
857
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
858
           env->tr.base, env->tr.limit);
859
#endif
860

    
861
    if (!(env->tr.flags & DESC_P_MASK))
862
        cpu_abort(env, "invalid tss");
863
    index = 8 * level + 4;
864
    if ((index + 7) > env->tr.limit)
865
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
866
    return ldq_kernel(env->tr.base + index);
867
}
868

    
869
/* 64 bit interrupt */
870
static void do_interrupt64(int intno, int is_int, int error_code,
871
                           target_ulong next_eip, int is_hw)
872
{
873
    SegmentCache *dt;
874
    target_ulong ptr;
875
    int type, dpl, selector, cpl, ist;
876
    int has_error_code, new_stack;
877
    uint32_t e1, e2, e3, ss;
878
    target_ulong old_eip, esp, offset;
879

    
880
    has_error_code = 0;
881
    if (!is_int && !is_hw) {
882
        switch(intno) {
883
        case 8:
884
        case 10:
885
        case 11:
886
        case 12:
887
        case 13:
888
        case 14:
889
        case 17:
890
            has_error_code = 1;
891
            break;
892
        }
893
    }
894
    if (is_int)
895
        old_eip = next_eip;
896
    else
897
        old_eip = env->eip;
898

    
899
    dt = &env->idt;
900
    if (intno * 16 + 15 > dt->limit)
901
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902
    ptr = dt->base + intno * 16;
903
    e1 = ldl_kernel(ptr);
904
    e2 = ldl_kernel(ptr + 4);
905
    e3 = ldl_kernel(ptr + 8);
906
    /* check gate type */
907
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908
    switch(type) {
909
    case 14: /* 386 interrupt gate */
910
    case 15: /* 386 trap gate */
911
        break;
912
    default:
913
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
        break;
915
    }
916
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
    cpl = env->hflags & HF_CPL_MASK;
918
    /* check privilege if software int */
919
    if (is_int && dpl < cpl)
920
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921
    /* check valid bit */
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924
    selector = e1 >> 16;
925
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926
    ist = e2 & 7;
927
    if ((selector & 0xfffc) == 0)
928
        raise_exception_err(EXCP0D_GPF, 0);
929

    
930
    if (load_segment(&e1, &e2, selector) != 0)
931
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935
    if (dpl > cpl)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_P_MASK))
938
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942
        /* to inner privilege */
943
        if (ist != 0)
944
            esp = get_rsp_from_tss(ist + 3);
945
        else
946
            esp = get_rsp_from_tss(dpl);
947
        esp &= ~0xfLL; /* align stack */
948
        ss = 0;
949
        new_stack = 1;
950
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951
        /* to same privilege */
952
        if (env->eflags & VM_MASK)
953
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
        new_stack = 0;
955
        if (ist != 0)
956
            esp = get_rsp_from_tss(ist + 3);
957
        else
958
            esp = ESP;
959
        esp &= ~0xfLL; /* align stack */
960
        dpl = cpl;
961
    } else {
962
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963
        new_stack = 0; /* avoid warning */
964
        esp = 0; /* avoid warning */
965
    }
966

    
967
    PUSHQ(esp, env->segs[R_SS].selector);
968
    PUSHQ(esp, ESP);
969
    PUSHQ(esp, compute_eflags());
970
    PUSHQ(esp, env->segs[R_CS].selector);
971
    PUSHQ(esp, old_eip);
972
    if (has_error_code) {
973
        PUSHQ(esp, error_code);
974
    }
975

    
976
    if (new_stack) {
977
        ss = 0 | dpl;
978
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979
    }
980
    ESP = esp;
981

    
982
    selector = (selector & ~3) | dpl;
983
    cpu_x86_load_seg_cache(env, R_CS, selector,
984
                   get_seg_base(e1, e2),
985
                   get_seg_limit(e1, e2),
986
                   e2);
987
    cpu_x86_set_cpl(env, dpl);
988
    env->eip = offset;
989

    
990
    /* interrupt gate clear IF mask */
991
    if ((type & 1) == 0) {
992
        env->eflags &= ~IF_MASK;
993
    }
994
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995
}
996
#endif
997

    
998
#if defined(CONFIG_USER_ONLY)
999
void helper_syscall(int next_eip_addend)
1000
{
1001
    env->exception_index = EXCP_SYSCALL;
1002
    env->exception_next_eip = env->eip + next_eip_addend;
1003
    cpu_loop_exit();
1004
}
1005
#else
1006
void helper_syscall(int next_eip_addend)
1007
{
1008
    int selector;
1009

    
1010
    if (!(env->efer & MSR_EFER_SCE)) {
1011
        raise_exception_err(EXCP06_ILLOP, 0);
1012
    }
1013
    selector = (env->star >> 32) & 0xffff;
1014
#ifdef TARGET_X86_64
1015
    if (env->hflags & HF_LMA_MASK) {
1016
        int code64;
1017

    
1018
        ECX = env->eip + next_eip_addend;
1019
        env->regs[11] = compute_eflags();
1020

    
1021
        code64 = env->hflags & HF_CS64_MASK;
1022

    
1023
        cpu_x86_set_cpl(env, 0);
1024
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025
                           0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK |
1028
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030
                               0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_W_MASK | DESC_A_MASK);
1034
        env->eflags &= ~env->fmask;
1035
        load_eflags(env->eflags, 0);
1036
        if (code64)
1037
            env->eip = env->lstar;
1038
        else
1039
            env->eip = env->cstar;
1040
    } else
1041
#endif
1042
    {
1043
        ECX = (uint32_t)(env->eip + next_eip_addend);
1044

    
1045
        cpu_x86_set_cpl(env, 0);
1046
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1047
                           0, 0xffffffff,
1048
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049
                               DESC_S_MASK |
1050
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1051
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1052
                               0, 0xffffffff,
1053
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054
                               DESC_S_MASK |
1055
                               DESC_W_MASK | DESC_A_MASK);
1056
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1057
        env->eip = (uint32_t)env->star;
1058
    }
1059
}
1060
#endif
1061

    
1062
void helper_sysret(int dflag)
1063
{
1064
    int cpl, selector;
1065

    
1066
    if (!(env->efer & MSR_EFER_SCE)) {
1067
        raise_exception_err(EXCP06_ILLOP, 0);
1068
    }
1069
    cpl = env->hflags & HF_CPL_MASK;
1070
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071
        raise_exception_err(EXCP0D_GPF, 0);
1072
    }
1073
    selector = (env->star >> 48) & 0xffff;
1074
#ifdef TARGET_X86_64
1075
    if (env->hflags & HF_LMA_MASK) {
1076
        if (dflag == 2) {
1077
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
                                   DESC_L_MASK);
1083
            env->eip = ECX;
1084
        } else {
1085
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086
                                   0, 0xffffffff,
1087
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090
            env->eip = (uint32_t)ECX;
1091
        }
1092
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_W_MASK | DESC_A_MASK);
1097
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
        cpu_x86_set_cpl(env, 3);
1100
    } else
1101
#endif
1102
    {
1103
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104
                               0, 0xffffffff,
1105
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108
        env->eip = (uint32_t)ECX;
1109
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1110
                               0, 0xffffffff,
1111
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113
                               DESC_W_MASK | DESC_A_MASK);
1114
        env->eflags |= IF_MASK;
1115
        cpu_x86_set_cpl(env, 3);
1116
    }
1117
#ifdef USE_KQEMU
1118
    if (kqemu_is_ok(env)) {
1119
        if (env->hflags & HF_LMA_MASK)
1120
            CC_OP = CC_OP_EFLAGS;
1121
        env->exception_index = -1;
1122
        cpu_loop_exit();
1123
    }
1124
#endif
1125
}
1126

    
1127
/* real mode interrupt */
1128
static void do_interrupt_real(int intno, int is_int, int error_code,
1129
                              unsigned int next_eip)
1130
{
1131
    SegmentCache *dt;
1132
    target_ulong ptr, ssp;
1133
    int selector;
1134
    uint32_t offset, esp;
1135
    uint32_t old_cs, old_eip;
1136

    
1137
    /* real mode (simpler !) */
1138
    dt = &env->idt;
1139
    if (intno * 4 + 3 > dt->limit)
1140
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1141
    ptr = dt->base + intno * 4;
1142
    offset = lduw_kernel(ptr);
1143
    selector = lduw_kernel(ptr + 2);
1144
    esp = ESP;
1145
    ssp = env->segs[R_SS].base;
1146
    if (is_int)
1147
        old_eip = next_eip;
1148
    else
1149
        old_eip = env->eip;
1150
    old_cs = env->segs[R_CS].selector;
1151
    /* XXX: use SS segment size ? */
1152
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1153
    PUSHW(ssp, esp, 0xffff, old_cs);
1154
    PUSHW(ssp, esp, 0xffff, old_eip);
1155

    
1156
    /* update processor state */
1157
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1158
    env->eip = offset;
1159
    env->segs[R_CS].selector = selector;
1160
    env->segs[R_CS].base = (selector << 4);
1161
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1162
}
1163

    
1164
/* fake user mode interrupt */
1165
void do_interrupt_user(int intno, int is_int, int error_code,
1166
                       target_ulong next_eip)
1167
{
1168
    SegmentCache *dt;
1169
    target_ulong ptr;
1170
    int dpl, cpl, shift;
1171
    uint32_t e2;
1172

    
1173
    dt = &env->idt;
1174
    if (env->hflags & HF_LMA_MASK) {
1175
        shift = 4;
1176
    } else {
1177
        shift = 3;
1178
    }
1179
    ptr = dt->base + (intno << shift);
1180
    e2 = ldl_kernel(ptr + 4);
1181

    
1182
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1183
    cpl = env->hflags & HF_CPL_MASK;
1184
    /* check privilege if software int */
1185
    if (is_int && dpl < cpl)
1186
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1187

    
1188
    /* Since we emulate only user space, we cannot do more than
1189
       exiting the emulation with the suitable exception and error
1190
       code */
1191
    if (is_int)
1192
        EIP = next_eip;
1193
}
1194

    
1195
/*
1196
 * Begin execution of an interruption. is_int is TRUE if coming from
1197
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1198
 * instruction. It is only relevant if is_int is TRUE.
1199
 */
1200
void do_interrupt(int intno, int is_int, int error_code,
1201
                  target_ulong next_eip, int is_hw)
1202
{
1203
    if (loglevel & CPU_LOG_INT) {
1204
        if ((env->cr[0] & CR0_PE_MASK)) {
1205
            static int count;
1206
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1207
                    count, intno, error_code, is_int,
1208
                    env->hflags & HF_CPL_MASK,
1209
                    env->segs[R_CS].selector, EIP,
1210
                    (int)env->segs[R_CS].base + EIP,
1211
                    env->segs[R_SS].selector, ESP);
1212
            if (intno == 0x0e) {
1213
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1214
            } else {
1215
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1216
            }
1217
            fprintf(logfile, "\n");
1218
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1219
#if 0
1220
            {
1221
                int i;
1222
                uint8_t *ptr;
1223
                fprintf(logfile, "       code=");
1224
                ptr = env->segs[R_CS].base + env->eip;
1225
                for(i = 0; i < 16; i++) {
1226
                    fprintf(logfile, " %02x", ldub(ptr + i));
1227
                }
1228
                fprintf(logfile, "\n");
1229
            }
1230
#endif
1231
            count++;
1232
        }
1233
    }
1234
    if (env->cr[0] & CR0_PE_MASK) {
1235
#ifdef TARGET_X86_64
1236
        if (env->hflags & HF_LMA_MASK) {
1237
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1238
        } else
1239
#endif
1240
        {
1241
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1242
        }
1243
    } else {
1244
        do_interrupt_real(intno, is_int, error_code, next_eip);
1245
    }
1246
}
1247

    
1248
/*
1249
 * Check nested exceptions and change to double or triple fault if
1250
 * needed. It should only be called, if this is not an interrupt.
1251
 * Returns the new exception number.
1252
 */
1253
static int check_exception(int intno, int *error_code)
1254
{
1255
    int first_contributory = env->old_exception == 0 ||
1256
                              (env->old_exception >= 10 &&
1257
                               env->old_exception <= 13);
1258
    int second_contributory = intno == 0 ||
1259
                               (intno >= 10 && intno <= 13);
1260

    
1261
    if (loglevel & CPU_LOG_INT)
1262
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1263
                env->old_exception, intno);
1264

    
1265
    if (env->old_exception == EXCP08_DBLE)
1266
        cpu_abort(env, "triple fault");
1267

    
1268
    if ((first_contributory && second_contributory)
1269
        || (env->old_exception == EXCP0E_PAGE &&
1270
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1271
        intno = EXCP08_DBLE;
1272
        *error_code = 0;
1273
    }
1274

    
1275
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1276
        (intno == EXCP08_DBLE))
1277
        env->old_exception = intno;
1278

    
1279
    return intno;
1280
}
1281

    
1282
/*
1283
 * Signal an interruption. It is executed in the main CPU loop.
1284
 * is_int is TRUE if coming from the int instruction. next_eip is the
1285
 * EIP value AFTER the interrupt instruction. It is only relevant if
1286
 * is_int is TRUE.
1287
 */
1288
void raise_interrupt(int intno, int is_int, int error_code,
1289
                     int next_eip_addend)
1290
{
1291
    if (!is_int) {
1292
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1293
        intno = check_exception(intno, &error_code);
1294
    } else {
1295
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1296
    }
1297

    
1298
    env->exception_index = intno;
1299
    env->error_code = error_code;
1300
    env->exception_is_int = is_int;
1301
    env->exception_next_eip = env->eip + next_eip_addend;
1302
    cpu_loop_exit();
1303
}
1304

    
1305
/* shortcuts to generate exceptions */
1306

    
1307
void (raise_exception_err)(int exception_index, int error_code)
1308
{
1309
    raise_interrupt(exception_index, 0, error_code, 0);
1310
}
1311

    
1312
void raise_exception(int exception_index)
1313
{
1314
    raise_interrupt(exception_index, 0, 0, 0);
1315
}
1316

    
1317
/* SMM support */
1318

    
1319
#if defined(CONFIG_USER_ONLY)
1320

    
1321
void do_smm_enter(void)
1322
{
1323
}
1324

    
1325
void helper_rsm(void)
1326
{
1327
}
1328

    
1329
#else
1330

    
1331
#ifdef TARGET_X86_64
1332
#define SMM_REVISION_ID 0x00020064
1333
#else
1334
#define SMM_REVISION_ID 0x00020000
1335
#endif
1336

    
1337
void do_smm_enter(void)
1338
{
1339
    target_ulong sm_state;
1340
    SegmentCache *dt;
1341
    int i, offset;
1342

    
1343
    if (loglevel & CPU_LOG_INT) {
1344
        fprintf(logfile, "SMM: enter\n");
1345
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1346
    }
1347

    
1348
    env->hflags |= HF_SMM_MASK;
1349
    cpu_smm_update(env);
1350

    
1351
    sm_state = env->smbase + 0x8000;
1352

    
1353
#ifdef TARGET_X86_64
1354
    for(i = 0; i < 6; i++) {
1355
        dt = &env->segs[i];
1356
        offset = 0x7e00 + i * 16;
1357
        stw_phys(sm_state + offset, dt->selector);
1358
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1359
        stl_phys(sm_state + offset + 4, dt->limit);
1360
        stq_phys(sm_state + offset + 8, dt->base);
1361
    }
1362

    
1363
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1364
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1365

    
1366
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1367
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1368
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1369
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1370

    
1371
    stq_phys(sm_state + 0x7e88, env->idt.base);
1372
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1373

    
1374
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1375
    stq_phys(sm_state + 0x7e98, env->tr.base);
1376
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1377
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1378

    
1379
    stq_phys(sm_state + 0x7ed0, env->efer);
1380

    
1381
    stq_phys(sm_state + 0x7ff8, EAX);
1382
    stq_phys(sm_state + 0x7ff0, ECX);
1383
    stq_phys(sm_state + 0x7fe8, EDX);
1384
    stq_phys(sm_state + 0x7fe0, EBX);
1385
    stq_phys(sm_state + 0x7fd8, ESP);
1386
    stq_phys(sm_state + 0x7fd0, EBP);
1387
    stq_phys(sm_state + 0x7fc8, ESI);
1388
    stq_phys(sm_state + 0x7fc0, EDI);
1389
    for(i = 8; i < 16; i++)
1390
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1391
    stq_phys(sm_state + 0x7f78, env->eip);
1392
    stl_phys(sm_state + 0x7f70, compute_eflags());
1393
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1394
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1395

    
1396
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1397
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1398
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1399

    
1400
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1401
    stl_phys(sm_state + 0x7f00, env->smbase);
1402
#else
1403
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1404
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1405
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1406
    stl_phys(sm_state + 0x7ff0, env->eip);
1407
    stl_phys(sm_state + 0x7fec, EDI);
1408
    stl_phys(sm_state + 0x7fe8, ESI);
1409
    stl_phys(sm_state + 0x7fe4, EBP);
1410
    stl_phys(sm_state + 0x7fe0, ESP);
1411
    stl_phys(sm_state + 0x7fdc, EBX);
1412
    stl_phys(sm_state + 0x7fd8, EDX);
1413
    stl_phys(sm_state + 0x7fd4, ECX);
1414
    stl_phys(sm_state + 0x7fd0, EAX);
1415
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1416
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1417

    
1418
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1419
    stl_phys(sm_state + 0x7f64, env->tr.base);
1420
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1421
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1424
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1425
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1426
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1427

    
1428
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1429
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1430

    
1431
    stl_phys(sm_state + 0x7f58, env->idt.base);
1432
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1433

    
1434
    for(i = 0; i < 6; i++) {
1435
        dt = &env->segs[i];
1436
        if (i < 3)
1437
            offset = 0x7f84 + i * 12;
1438
        else
1439
            offset = 0x7f2c + (i - 3) * 12;
1440
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1441
        stl_phys(sm_state + offset + 8, dt->base);
1442
        stl_phys(sm_state + offset + 4, dt->limit);
1443
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1444
    }
1445
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1446

    
1447
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1448
    stl_phys(sm_state + 0x7ef8, env->smbase);
1449
#endif
1450
    /* init SMM cpu state */
1451

    
1452
#ifdef TARGET_X86_64
1453
    cpu_load_efer(env, 0);
1454
#endif
1455
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1456
    env->eip = 0x00008000;
1457
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1458
                           0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1460
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1461
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1462
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1463
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1464

    
1465
    cpu_x86_update_cr0(env,
1466
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1467
    cpu_x86_update_cr4(env, 0);
1468
    env->dr[7] = 0x00000400;
1469
    CC_OP = CC_OP_EFLAGS;
1470
}
1471

    
1472
void helper_rsm(void)
1473
{
1474
    target_ulong sm_state;
1475
    int i, offset;
1476
    uint32_t val;
1477

    
1478
    sm_state = env->smbase + 0x8000;
1479
#ifdef TARGET_X86_64
1480
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
/* division, flags are undefined */
1596

    
1597
void helper_divb_AL(target_ulong t0)
1598
{
1599
    unsigned int num, den, q, r;
1600

    
1601
    num = (EAX & 0xffff);
1602
    den = (t0 & 0xff);
1603
    if (den == 0) {
1604
        raise_exception(EXCP00_DIVZ);
1605
    }
1606
    q = (num / den);
1607
    if (q > 0xff)
1608
        raise_exception(EXCP00_DIVZ);
1609
    q &= 0xff;
1610
    r = (num % den) & 0xff;
1611
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1612
}
1613

    
1614
void helper_idivb_AL(target_ulong t0)
1615
{
1616
    int num, den, q, r;
1617

    
1618
    num = (int16_t)EAX;
1619
    den = (int8_t)t0;
1620
    if (den == 0) {
1621
        raise_exception(EXCP00_DIVZ);
1622
    }
1623
    q = (num / den);
1624
    if (q != (int8_t)q)
1625
        raise_exception(EXCP00_DIVZ);
1626
    q &= 0xff;
1627
    r = (num % den) & 0xff;
1628
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1629
}
1630

    
1631
void helper_divw_AX(target_ulong t0)
1632
{
1633
    unsigned int num, den, q, r;
1634

    
1635
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1636
    den = (t0 & 0xffff);
1637
    if (den == 0) {
1638
        raise_exception(EXCP00_DIVZ);
1639
    }
1640
    q = (num / den);
1641
    if (q > 0xffff)
1642
        raise_exception(EXCP00_DIVZ);
1643
    q &= 0xffff;
1644
    r = (num % den) & 0xffff;
1645
    EAX = (EAX & ~0xffff) | q;
1646
    EDX = (EDX & ~0xffff) | r;
1647
}
1648

    
1649
void helper_idivw_AX(target_ulong t0)
1650
{
1651
    int num, den, q, r;
1652

    
1653
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1654
    den = (int16_t)t0;
1655
    if (den == 0) {
1656
        raise_exception(EXCP00_DIVZ);
1657
    }
1658
    q = (num / den);
1659
    if (q != (int16_t)q)
1660
        raise_exception(EXCP00_DIVZ);
1661
    q &= 0xffff;
1662
    r = (num % den) & 0xffff;
1663
    EAX = (EAX & ~0xffff) | q;
1664
    EDX = (EDX & ~0xffff) | r;
1665
}
1666

    
1667
void helper_divl_EAX(target_ulong t0)
1668
{
1669
    unsigned int den, r;
1670
    uint64_t num, q;
1671

    
1672
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1673
    den = t0;
1674
    if (den == 0) {
1675
        raise_exception(EXCP00_DIVZ);
1676
    }
1677
    q = (num / den);
1678
    r = (num % den);
1679
    if (q > 0xffffffff)
1680
        raise_exception(EXCP00_DIVZ);
1681
    EAX = (uint32_t)q;
1682
    EDX = (uint32_t)r;
1683
}
1684

    
1685
void helper_idivl_EAX(target_ulong t0)
1686
{
1687
    int den, r;
1688
    int64_t num, q;
1689

    
1690
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1691
    den = t0;
1692
    if (den == 0) {
1693
        raise_exception(EXCP00_DIVZ);
1694
    }
1695
    q = (num / den);
1696
    r = (num % den);
1697
    if (q != (int32_t)q)
1698
        raise_exception(EXCP00_DIVZ);
1699
    EAX = (uint32_t)q;
1700
    EDX = (uint32_t)r;
1701
}
1702

    
1703
/* bcd */
1704

    
1705
/* XXX: exception */
1706
void helper_aam(int base)
1707
{
1708
    int al, ah;
1709
    al = EAX & 0xff;
1710
    ah = al / base;
1711
    al = al % base;
1712
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1713
    CC_DST = al;
1714
}
1715

    
1716
void helper_aad(int base)
1717
{
1718
    int al, ah;
1719
    al = EAX & 0xff;
1720
    ah = (EAX >> 8) & 0xff;
1721
    al = ((ah * base) + al) & 0xff;
1722
    EAX = (EAX & ~0xffff) | al;
1723
    CC_DST = al;
1724
}
1725

    
1726
void helper_aaa(void)
1727
{
1728
    int icarry;
1729
    int al, ah, af;
1730
    int eflags;
1731

    
1732
    eflags = helper_cc_compute_all(CC_OP);
1733
    af = eflags & CC_A;
1734
    al = EAX & 0xff;
1735
    ah = (EAX >> 8) & 0xff;
1736

    
1737
    icarry = (al > 0xf9);
1738
    if (((al & 0x0f) > 9 ) || af) {
1739
        al = (al + 6) & 0x0f;
1740
        ah = (ah + 1 + icarry) & 0xff;
1741
        eflags |= CC_C | CC_A;
1742
    } else {
1743
        eflags &= ~(CC_C | CC_A);
1744
        al &= 0x0f;
1745
    }
1746
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1747
    CC_SRC = eflags;
1748
    FORCE_RET();
1749
}
1750

    
1751
void helper_aas(void)
1752
{
1753
    int icarry;
1754
    int al, ah, af;
1755
    int eflags;
1756

    
1757
    eflags = helper_cc_compute_all(CC_OP);
1758
    af = eflags & CC_A;
1759
    al = EAX & 0xff;
1760
    ah = (EAX >> 8) & 0xff;
1761

    
1762
    icarry = (al < 6);
1763
    if (((al & 0x0f) > 9 ) || af) {
1764
        al = (al - 6) & 0x0f;
1765
        ah = (ah - 1 - icarry) & 0xff;
1766
        eflags |= CC_C | CC_A;
1767
    } else {
1768
        eflags &= ~(CC_C | CC_A);
1769
        al &= 0x0f;
1770
    }
1771
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1772
    CC_SRC = eflags;
1773
    FORCE_RET();
1774
}
1775

    
1776
void helper_daa(void)
1777
{
1778
    int al, af, cf;
1779
    int eflags;
1780

    
1781
    eflags = helper_cc_compute_all(CC_OP);
1782
    cf = eflags & CC_C;
1783
    af = eflags & CC_A;
1784
    al = EAX & 0xff;
1785

    
1786
    eflags = 0;
1787
    if (((al & 0x0f) > 9 ) || af) {
1788
        al = (al + 6) & 0xff;
1789
        eflags |= CC_A;
1790
    }
1791
    if ((al > 0x9f) || cf) {
1792
        al = (al + 0x60) & 0xff;
1793
        eflags |= CC_C;
1794
    }
1795
    EAX = (EAX & ~0xff) | al;
1796
    /* well, speed is not an issue here, so we compute the flags by hand */
1797
    eflags |= (al == 0) << 6; /* zf */
1798
    eflags |= parity_table[al]; /* pf */
1799
    eflags |= (al & 0x80); /* sf */
1800
    CC_SRC = eflags;
1801
    FORCE_RET();
1802
}
1803

    
1804
void helper_das(void)
1805
{
1806
    int al, al1, af, cf;
1807
    int eflags;
1808

    
1809
    eflags = helper_cc_compute_all(CC_OP);
1810
    cf = eflags & CC_C;
1811
    af = eflags & CC_A;
1812
    al = EAX & 0xff;
1813

    
1814
    eflags = 0;
1815
    al1 = al;
1816
    if (((al & 0x0f) > 9 ) || af) {
1817
        eflags |= CC_A;
1818
        if (al < 6 || cf)
1819
            eflags |= CC_C;
1820
        al = (al - 6) & 0xff;
1821
    }
1822
    if ((al1 > 0x99) || cf) {
1823
        al = (al - 0x60) & 0xff;
1824
        eflags |= CC_C;
1825
    }
1826
    EAX = (EAX & ~0xff) | al;
1827
    /* well, speed is not an issue here, so we compute the flags by hand */
1828
    eflags |= (al == 0) << 6; /* zf */
1829
    eflags |= parity_table[al]; /* pf */
1830
    eflags |= (al & 0x80); /* sf */
1831
    CC_SRC = eflags;
1832
    FORCE_RET();
1833
}
1834

    
1835
void helper_into(int next_eip_addend)
1836
{
1837
    int eflags;
1838
    eflags = helper_cc_compute_all(CC_OP);
1839
    if (eflags & CC_O) {
1840
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1841
    }
1842
}
1843

    
1844
void helper_cmpxchg8b(target_ulong a0)
1845
{
1846
    uint64_t d;
1847
    int eflags;
1848

    
1849
    eflags = helper_cc_compute_all(CC_OP);
1850
    d = ldq(a0);
1851
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1852
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1853
        eflags |= CC_Z;
1854
    } else {
1855
        /* always do the store */
1856
        stq(a0, d); 
1857
        EDX = (uint32_t)(d >> 32);
1858
        EAX = (uint32_t)d;
1859
        eflags &= ~CC_Z;
1860
    }
1861
    CC_SRC = eflags;
1862
}
1863

    
1864
#ifdef TARGET_X86_64
1865
void helper_cmpxchg16b(target_ulong a0)
1866
{
1867
    uint64_t d0, d1;
1868
    int eflags;
1869

    
1870
    if ((a0 & 0xf) != 0)
1871
        raise_exception(EXCP0D_GPF);
1872
    eflags = helper_cc_compute_all(CC_OP);
1873
    d0 = ldq(a0);
1874
    d1 = ldq(a0 + 8);
1875
    if (d0 == EAX && d1 == EDX) {
1876
        stq(a0, EBX);
1877
        stq(a0 + 8, ECX);
1878
        eflags |= CC_Z;
1879
    } else {
1880
        /* always do the store */
1881
        stq(a0, d0); 
1882
        stq(a0 + 8, d1); 
1883
        EDX = d1;
1884
        EAX = d0;
1885
        eflags &= ~CC_Z;
1886
    }
1887
    CC_SRC = eflags;
1888
}
1889
#endif
1890

    
1891
void helper_single_step(void)
1892
{
1893
#ifndef CONFIG_USER_ONLY
1894
    check_hw_breakpoints(env, 1);
1895
    env->dr[6] |= DR6_BS;
1896
#endif
1897
    raise_exception(EXCP01_DB);
1898
}
1899

    
1900
void helper_cpuid(void)
1901
{
1902
    uint32_t eax, ebx, ecx, edx;
1903

    
1904
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1905

    
1906
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1907
    EAX = eax;
1908
    EBX = ebx;
1909
    ECX = ecx;
1910
    EDX = edx;
1911
}
1912

    
1913
void helper_enter_level(int level, int data32, target_ulong t1)
1914
{
1915
    target_ulong ssp;
1916
    uint32_t esp_mask, esp, ebp;
1917

    
1918
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1919
    ssp = env->segs[R_SS].base;
1920
    ebp = EBP;
1921
    esp = ESP;
1922
    if (data32) {
1923
        /* 32 bit */
1924
        esp -= 4;
1925
        while (--level) {
1926
            esp -= 4;
1927
            ebp -= 4;
1928
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1929
        }
1930
        esp -= 4;
1931
        stl(ssp + (esp & esp_mask), t1);
1932
    } else {
1933
        /* 16 bit */
1934
        esp -= 2;
1935
        while (--level) {
1936
            esp -= 2;
1937
            ebp -= 2;
1938
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1939
        }
1940
        esp -= 2;
1941
        stw(ssp + (esp & esp_mask), t1);
1942
    }
1943
}
1944

    
1945
#ifdef TARGET_X86_64
1946
void helper_enter64_level(int level, int data64, target_ulong t1)
1947
{
1948
    target_ulong esp, ebp;
1949
    ebp = EBP;
1950
    esp = ESP;
1951

    
1952
    if (data64) {
1953
        /* 64 bit */
1954
        esp -= 8;
1955
        while (--level) {
1956
            esp -= 8;
1957
            ebp -= 8;
1958
            stq(esp, ldq(ebp));
1959
        }
1960
        esp -= 8;
1961
        stq(esp, t1);
1962
    } else {
1963
        /* 16 bit */
1964
        esp -= 2;
1965
        while (--level) {
1966
            esp -= 2;
1967
            ebp -= 2;
1968
            stw(esp, lduw(ebp));
1969
        }
1970
        esp -= 2;
1971
        stw(esp, t1);
1972
    }
1973
}
1974
#endif
1975

    
1976
void helper_lldt(int selector)
1977
{
1978
    SegmentCache *dt;
1979
    uint32_t e1, e2;
1980
    int index, entry_limit;
1981
    target_ulong ptr;
1982

    
1983
    selector &= 0xffff;
1984
    if ((selector & 0xfffc) == 0) {
1985
        /* XXX: NULL selector case: invalid LDT */
1986
        env->ldt.base = 0;
1987
        env->ldt.limit = 0;
1988
    } else {
1989
        if (selector & 0x4)
1990
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1991
        dt = &env->gdt;
1992
        index = selector & ~7;
1993
#ifdef TARGET_X86_64
1994
        if (env->hflags & HF_LMA_MASK)
1995
            entry_limit = 15;
1996
        else
1997
#endif
1998
            entry_limit = 7;
1999
        if ((index + entry_limit) > dt->limit)
2000
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2001
        ptr = dt->base + index;
2002
        e1 = ldl_kernel(ptr);
2003
        e2 = ldl_kernel(ptr + 4);
2004
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2005
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2006
        if (!(e2 & DESC_P_MASK))
2007
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2008
#ifdef TARGET_X86_64
2009
        if (env->hflags & HF_LMA_MASK) {
2010
            uint32_t e3;
2011
            e3 = ldl_kernel(ptr + 8);
2012
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2013
            env->ldt.base |= (target_ulong)e3 << 32;
2014
        } else
2015
#endif
2016
        {
2017
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2018
        }
2019
    }
2020
    env->ldt.selector = selector;
2021
}
2022

    
2023
void helper_ltr(int selector)
2024
{
2025
    SegmentCache *dt;
2026
    uint32_t e1, e2;
2027
    int index, type, entry_limit;
2028
    target_ulong ptr;
2029

    
2030
    selector &= 0xffff;
2031
    if ((selector & 0xfffc) == 0) {
2032
        /* NULL selector case: invalid TR */
2033
        env->tr.base = 0;
2034
        env->tr.limit = 0;
2035
        env->tr.flags = 0;
2036
    } else {
2037
        if (selector & 0x4)
2038
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039
        dt = &env->gdt;
2040
        index = selector & ~7;
2041
#ifdef TARGET_X86_64
2042
        if (env->hflags & HF_LMA_MASK)
2043
            entry_limit = 15;
2044
        else
2045
#endif
2046
            entry_limit = 7;
2047
        if ((index + entry_limit) > dt->limit)
2048
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2049
        ptr = dt->base + index;
2050
        e1 = ldl_kernel(ptr);
2051
        e2 = ldl_kernel(ptr + 4);
2052
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2053
        if ((e2 & DESC_S_MASK) ||
2054
            (type != 1 && type != 9))
2055
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2056
        if (!(e2 & DESC_P_MASK))
2057
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2058
#ifdef TARGET_X86_64
2059
        if (env->hflags & HF_LMA_MASK) {
2060
            uint32_t e3, e4;
2061
            e3 = ldl_kernel(ptr + 8);
2062
            e4 = ldl_kernel(ptr + 12);
2063
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2064
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2065
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2066
            env->tr.base |= (target_ulong)e3 << 32;
2067
        } else
2068
#endif
2069
        {
2070
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2071
        }
2072
        e2 |= DESC_TSS_BUSY_MASK;
2073
        stl_kernel(ptr + 4, e2);
2074
    }
2075
    env->tr.selector = selector;
2076
}
2077

    
2078
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2079
void helper_load_seg(int seg_reg, int selector)
2080
{
2081
    uint32_t e1, e2;
2082
    int cpl, dpl, rpl;
2083
    SegmentCache *dt;
2084
    int index;
2085
    target_ulong ptr;
2086

    
2087
    selector &= 0xffff;
2088
    cpl = env->hflags & HF_CPL_MASK;
2089
    if ((selector & 0xfffc) == 0) {
2090
        /* null selector case */
2091
        if (seg_reg == R_SS
2092
#ifdef TARGET_X86_64
2093
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2094
#endif
2095
            )
2096
            raise_exception_err(EXCP0D_GPF, 0);
2097
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2098
    } else {
2099

    
2100
        if (selector & 0x4)
2101
            dt = &env->ldt;
2102
        else
2103
            dt = &env->gdt;
2104
        index = selector & ~7;
2105
        if ((index + 7) > dt->limit)
2106
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2107
        ptr = dt->base + index;
2108
        e1 = ldl_kernel(ptr);
2109
        e2 = ldl_kernel(ptr + 4);
2110

    
2111
        if (!(e2 & DESC_S_MASK))
2112
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2113
        rpl = selector & 3;
2114
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2115
        if (seg_reg == R_SS) {
2116
            /* must be writable segment */
2117
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2118
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
            if (rpl != cpl || dpl != cpl)
2120
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2121
        } else {
2122
            /* must be readable segment */
2123
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2124
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2125

    
2126
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2127
                /* if not conforming code, test rights */
2128
                if (dpl < cpl || dpl < rpl)
2129
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130
            }
2131
        }
2132

    
2133
        if (!(e2 & DESC_P_MASK)) {
2134
            if (seg_reg == R_SS)
2135
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2136
            else
2137
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2138
        }
2139

    
2140
        /* set the access bit if not already set */
2141
        if (!(e2 & DESC_A_MASK)) {
2142
            e2 |= DESC_A_MASK;
2143
            stl_kernel(ptr + 4, e2);
2144
        }
2145

    
2146
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2147
                       get_seg_base(e1, e2),
2148
                       get_seg_limit(e1, e2),
2149
                       e2);
2150
#if 0
2151
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2152
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2153
#endif
2154
    }
2155
}
2156

    
2157
/* protected mode jump */
2158
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2159
                           int next_eip_addend)
2160
{
2161
    int gate_cs, type;
2162
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2163
    target_ulong next_eip;
2164

    
2165
    if ((new_cs & 0xfffc) == 0)
2166
        raise_exception_err(EXCP0D_GPF, 0);
2167
    if (load_segment(&e1, &e2, new_cs) != 0)
2168
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
    cpl = env->hflags & HF_CPL_MASK;
2170
    if (e2 & DESC_S_MASK) {
2171
        if (!(e2 & DESC_CS_MASK))
2172
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2173
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2174
        if (e2 & DESC_C_MASK) {
2175
            /* conforming code segment */
2176
            if (dpl > cpl)
2177
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178
        } else {
2179
            /* non conforming code segment */
2180
            rpl = new_cs & 3;
2181
            if (rpl > cpl)
2182
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183
            if (dpl != cpl)
2184
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2185
        }
2186
        if (!(e2 & DESC_P_MASK))
2187
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2188
        limit = get_seg_limit(e1, e2);
2189
        if (new_eip > limit &&
2190
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2191
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2192
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2193
                       get_seg_base(e1, e2), limit, e2);
2194
        EIP = new_eip;
2195
    } else {
2196
        /* jump to call or task gate */
2197
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2198
        rpl = new_cs & 3;
2199
        cpl = env->hflags & HF_CPL_MASK;
2200
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2201
        switch(type) {
2202
        case 1: /* 286 TSS */
2203
        case 9: /* 386 TSS */
2204
        case 5: /* task gate */
2205
            if (dpl < cpl || dpl < rpl)
2206
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207
            next_eip = env->eip + next_eip_addend;
2208
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2209
            CC_OP = CC_OP_EFLAGS;
2210
            break;
2211
        case 4: /* 286 call gate */
2212
        case 12: /* 386 call gate */
2213
            if ((dpl < cpl) || (dpl < rpl))
2214
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2215
            if (!(e2 & DESC_P_MASK))
2216
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2217
            gate_cs = e1 >> 16;
2218
            new_eip = (e1 & 0xffff);
2219
            if (type == 12)
2220
                new_eip |= (e2 & 0xffff0000);
2221
            if (load_segment(&e1, &e2, gate_cs) != 0)
2222
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2223
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2224
            /* must be code segment */
2225
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2226
                 (DESC_S_MASK | DESC_CS_MASK)))
2227
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2228
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2229
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2230
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2231
            if (!(e2 & DESC_P_MASK))
2232
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2233
            limit = get_seg_limit(e1, e2);
2234
            if (new_eip > limit)
2235
                raise_exception_err(EXCP0D_GPF, 0);
2236
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2237
                                   get_seg_base(e1, e2), limit, e2);
2238
            EIP = new_eip;
2239
            break;
2240
        default:
2241
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2242
            break;
2243
        }
2244
    }
2245
}
2246

    
2247
/* real mode call */
2248
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2249
                       int shift, int next_eip)
2250
{
2251
    int new_eip;
2252
    uint32_t esp, esp_mask;
2253
    target_ulong ssp;
2254

    
2255
    new_eip = new_eip1;
2256
    esp = ESP;
2257
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2258
    ssp = env->segs[R_SS].base;
2259
    if (shift) {
2260
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2261
        PUSHL(ssp, esp, esp_mask, next_eip);
2262
    } else {
2263
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2264
        PUSHW(ssp, esp, esp_mask, next_eip);
2265
    }
2266

    
2267
    SET_ESP(esp, esp_mask);
2268
    env->eip = new_eip;
2269
    env->segs[R_CS].selector = new_cs;
2270
    env->segs[R_CS].base = (new_cs << 4);
2271
}
2272

    
2273
/* protected mode call */
2274
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2275
                            int shift, int next_eip_addend)
2276
{
2277
    int new_stack, i;
2278
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2279
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2280
    uint32_t val, limit, old_sp_mask;
2281
    target_ulong ssp, old_ssp, next_eip;
2282

    
2283
    next_eip = env->eip + next_eip_addend;
2284
#ifdef DEBUG_PCALL
2285
    if (loglevel & CPU_LOG_PCALL) {
2286
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2287
                new_cs, (uint32_t)new_eip, shift);
2288
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2289
    }
2290
#endif
2291
    if ((new_cs & 0xfffc) == 0)
2292
        raise_exception_err(EXCP0D_GPF, 0);
2293
    if (load_segment(&e1, &e2, new_cs) != 0)
2294
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2295
    cpl = env->hflags & HF_CPL_MASK;
2296
#ifdef DEBUG_PCALL
2297
    if (loglevel & CPU_LOG_PCALL) {
2298
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2299
    }
2300
#endif
2301
    if (e2 & DESC_S_MASK) {
2302
        if (!(e2 & DESC_CS_MASK))
2303
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2304
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2305
        if (e2 & DESC_C_MASK) {
2306
            /* conforming code segment */
2307
            if (dpl > cpl)
2308
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2309
        } else {
2310
            /* non conforming code segment */
2311
            rpl = new_cs & 3;
2312
            if (rpl > cpl)
2313
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2314
            if (dpl != cpl)
2315
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2316
        }
2317
        if (!(e2 & DESC_P_MASK))
2318
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2319

    
2320
#ifdef TARGET_X86_64
2321
        /* XXX: check 16/32 bit cases in long mode */
2322
        if (shift == 2) {
2323
            target_ulong rsp;
2324
            /* 64 bit case */
2325
            rsp = ESP;
2326
            PUSHQ(rsp, env->segs[R_CS].selector);
2327
            PUSHQ(rsp, next_eip);
2328
            /* from this point, not restartable */
2329
            ESP = rsp;
2330
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2331
                                   get_seg_base(e1, e2),
2332
                                   get_seg_limit(e1, e2), e2);
2333
            EIP = new_eip;
2334
        } else
2335
#endif
2336
        {
2337
            sp = ESP;
2338
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2339
            ssp = env->segs[R_SS].base;
2340
            if (shift) {
2341
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2342
                PUSHL(ssp, sp, sp_mask, next_eip);
2343
            } else {
2344
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2345
                PUSHW(ssp, sp, sp_mask, next_eip);
2346
            }
2347

    
2348
            limit = get_seg_limit(e1, e2);
2349
            if (new_eip > limit)
2350
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2351
            /* from this point, not restartable */
2352
            SET_ESP(sp, sp_mask);
2353
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2354
                                   get_seg_base(e1, e2), limit, e2);
2355
            EIP = new_eip;
2356
        }
2357
    } else {
2358
        /* check gate type */
2359
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2360
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2361
        rpl = new_cs & 3;
2362
        switch(type) {
2363
        case 1: /* available 286 TSS */
2364
        case 9: /* available 386 TSS */
2365
        case 5: /* task gate */
2366
            if (dpl < cpl || dpl < rpl)
2367
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2368
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2369
            CC_OP = CC_OP_EFLAGS;
2370
            return;
2371
        case 4: /* 286 call gate */
2372
        case 12: /* 386 call gate */
2373
            break;
2374
        default:
2375
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376
            break;
2377
        }
2378
        shift = type >> 3;
2379

    
2380
        if (dpl < cpl || dpl < rpl)
2381
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382
        /* check valid bit */
2383
        if (!(e2 & DESC_P_MASK))
2384
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2385
        selector = e1 >> 16;
2386
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2387
        param_count = e2 & 0x1f;
2388
        if ((selector & 0xfffc) == 0)
2389
            raise_exception_err(EXCP0D_GPF, 0);
2390

    
2391
        if (load_segment(&e1, &e2, selector) != 0)
2392
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2393
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2394
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2395
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2396
        if (dpl > cpl)
2397
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2398
        if (!(e2 & DESC_P_MASK))
2399
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2400

    
2401
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2402
            /* to inner privilege */
2403
            get_ss_esp_from_tss(&ss, &sp, dpl);
2404
#ifdef DEBUG_PCALL
2405
            if (loglevel & CPU_LOG_PCALL)
2406
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2407
                        ss, sp, param_count, ESP);
2408
#endif
2409
            if ((ss & 0xfffc) == 0)
2410
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2411
            if ((ss & 3) != dpl)
2412
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2413
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2414
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2415
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2416
            if (ss_dpl != dpl)
2417
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2418
            if (!(ss_e2 & DESC_S_MASK) ||
2419
                (ss_e2 & DESC_CS_MASK) ||
2420
                !(ss_e2 & DESC_W_MASK))
2421
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2422
            if (!(ss_e2 & DESC_P_MASK))
2423
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2424

    
2425
            //            push_size = ((param_count * 2) + 8) << shift;
2426

    
2427
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2428
            old_ssp = env->segs[R_SS].base;
2429

    
2430
            sp_mask = get_sp_mask(ss_e2);
2431
            ssp = get_seg_base(ss_e1, ss_e2);
2432
            if (shift) {
2433
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2434
                PUSHL(ssp, sp, sp_mask, ESP);
2435
                for(i = param_count - 1; i >= 0; i--) {
2436
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2437
                    PUSHL(ssp, sp, sp_mask, val);
2438
                }
2439
            } else {
2440
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2441
                PUSHW(ssp, sp, sp_mask, ESP);
2442
                for(i = param_count - 1; i >= 0; i--) {
2443
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2444
                    PUSHW(ssp, sp, sp_mask, val);
2445
                }
2446
            }
2447
            new_stack = 1;
2448
        } else {
2449
            /* to same privilege */
2450
            sp = ESP;
2451
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2452
            ssp = env->segs[R_SS].base;
2453
            //            push_size = (4 << shift);
2454
            new_stack = 0;
2455
        }
2456

    
2457
        if (shift) {
2458
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2459
            PUSHL(ssp, sp, sp_mask, next_eip);
2460
        } else {
2461
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2462
            PUSHW(ssp, sp, sp_mask, next_eip);
2463
        }
2464

    
2465
        /* from this point, not restartable */
2466

    
2467
        if (new_stack) {
2468
            ss = (ss & ~3) | dpl;
2469
            cpu_x86_load_seg_cache(env, R_SS, ss,
2470
                                   ssp,
2471
                                   get_seg_limit(ss_e1, ss_e2),
2472
                                   ss_e2);
2473
        }
2474

    
2475
        selector = (selector & ~3) | dpl;
2476
        cpu_x86_load_seg_cache(env, R_CS, selector,
2477
                       get_seg_base(e1, e2),
2478
                       get_seg_limit(e1, e2),
2479
                       e2);
2480
        cpu_x86_set_cpl(env, dpl);
2481
        SET_ESP(sp, sp_mask);
2482
        EIP = offset;
2483
    }
2484
#ifdef USE_KQEMU
2485
    if (kqemu_is_ok(env)) {
2486
        env->exception_index = -1;
2487
        cpu_loop_exit();
2488
    }
2489
#endif
2490
}
2491

    
2492
/* real and vm86 mode iret */
2493
void helper_iret_real(int shift)
2494
{
2495
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2496
    target_ulong ssp;
2497
    int eflags_mask;
2498

    
2499
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2500
    sp = ESP;
2501
    ssp = env->segs[R_SS].base;
2502
    if (shift == 1) {
2503
        /* 32 bits */
2504
        POPL(ssp, sp, sp_mask, new_eip);
2505
        POPL(ssp, sp, sp_mask, new_cs);
2506
        new_cs &= 0xffff;
2507
        POPL(ssp, sp, sp_mask, new_eflags);
2508
    } else {
2509
        /* 16 bits */
2510
        POPW(ssp, sp, sp_mask, new_eip);
2511
        POPW(ssp, sp, sp_mask, new_cs);
2512
        POPW(ssp, sp, sp_mask, new_eflags);
2513
    }
2514
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2515
    env->segs[R_CS].selector = new_cs;
2516
    env->segs[R_CS].base = (new_cs << 4);
2517
    env->eip = new_eip;
2518
    if (env->eflags & VM_MASK)
2519
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2520
    else
2521
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2522
    if (shift == 0)
2523
        eflags_mask &= 0xffff;
2524
    load_eflags(new_eflags, eflags_mask);
2525
    env->hflags2 &= ~HF2_NMI_MASK;
2526
}
2527

    
2528
static inline void validate_seg(int seg_reg, int cpl)
2529
{
2530
    int dpl;
2531
    uint32_t e2;
2532

    
2533
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2534
       they may still contain a valid base. I would be interested to
2535
       know how a real x86_64 CPU behaves */
2536
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2537
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2538
        return;
2539

    
2540
    e2 = env->segs[seg_reg].flags;
2541
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2542
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2543
        /* data or non conforming code segment */
2544
        if (dpl < cpl) {
2545
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2546
        }
2547
    }
2548
}
2549

    
2550
/* protected mode iret */
2551
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2552
{
2553
    uint32_t new_cs, new_eflags, new_ss;
2554
    uint32_t new_es, new_ds, new_fs, new_gs;
2555
    uint32_t e1, e2, ss_e1, ss_e2;
2556
    int cpl, dpl, rpl, eflags_mask, iopl;
2557
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2558

    
2559
#ifdef TARGET_X86_64
2560
    if (shift == 2)
2561
        sp_mask = -1;
2562
    else
2563
#endif
2564
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2565
    sp = ESP;
2566
    ssp = env->segs[R_SS].base;
2567
    new_eflags = 0; /* avoid warning */
2568
#ifdef TARGET_X86_64
2569
    if (shift == 2) {
2570
        POPQ(sp, new_eip);
2571
        POPQ(sp, new_cs);
2572
        new_cs &= 0xffff;
2573
        if (is_iret) {
2574
            POPQ(sp, new_eflags);
2575
        }
2576
    } else
2577
#endif
2578
    if (shift == 1) {
2579
        /* 32 bits */
2580
        POPL(ssp, sp, sp_mask, new_eip);
2581
        POPL(ssp, sp, sp_mask, new_cs);
2582
        new_cs &= 0xffff;
2583
        if (is_iret) {
2584
            POPL(ssp, sp, sp_mask, new_eflags);
2585
            if (new_eflags & VM_MASK)
2586
                goto return_to_vm86;
2587
        }
2588
    } else {
2589
        /* 16 bits */
2590
        POPW(ssp, sp, sp_mask, new_eip);
2591
        POPW(ssp, sp, sp_mask, new_cs);
2592
        if (is_iret)
2593
            POPW(ssp, sp, sp_mask, new_eflags);
2594
    }
2595
#ifdef DEBUG_PCALL
2596
    if (loglevel & CPU_LOG_PCALL) {
2597
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2598
                new_cs, new_eip, shift, addend);
2599
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2600
    }
2601
#endif
2602
    if ((new_cs & 0xfffc) == 0)
2603
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2604
    if (load_segment(&e1, &e2, new_cs) != 0)
2605
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2606
    if (!(e2 & DESC_S_MASK) ||
2607
        !(e2 & DESC_CS_MASK))
2608
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2609
    cpl = env->hflags & HF_CPL_MASK;
2610
    rpl = new_cs & 3;
2611
    if (rpl < cpl)
2612
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2614
    if (e2 & DESC_C_MASK) {
2615
        if (dpl > rpl)
2616
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2617
    } else {
2618
        if (dpl != rpl)
2619
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620
    }
2621
    if (!(e2 & DESC_P_MASK))
2622
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2623

    
2624
    sp += addend;
2625
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2626
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2627
        /* return to same privilege level */
2628
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2629
                       get_seg_base(e1, e2),
2630
                       get_seg_limit(e1, e2),
2631
                       e2);
2632
    } else {
2633
        /* return to different privilege level */
2634
#ifdef TARGET_X86_64
2635
        if (shift == 2) {
2636
            POPQ(sp, new_esp);
2637
            POPQ(sp, new_ss);
2638
            new_ss &= 0xffff;
2639
        } else
2640
#endif
2641
        if (shift == 1) {
2642
            /* 32 bits */
2643
            POPL(ssp, sp, sp_mask, new_esp);
2644
            POPL(ssp, sp, sp_mask, new_ss);
2645
            new_ss &= 0xffff;
2646
        } else {
2647
            /* 16 bits */
2648
            POPW(ssp, sp, sp_mask, new_esp);
2649
            POPW(ssp, sp, sp_mask, new_ss);
2650
        }
2651
#ifdef DEBUG_PCALL
2652
        if (loglevel & CPU_LOG_PCALL) {
2653
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2654
                    new_ss, new_esp);
2655
        }
2656
#endif
2657
        if ((new_ss & 0xfffc) == 0) {
2658
#ifdef TARGET_X86_64
2659
            /* NULL ss is allowed in long mode if cpl != 3*/
2660
            /* XXX: test CS64 ? */
2661
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2662
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2663
                                       0, 0xffffffff,
2664
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2665
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2666
                                       DESC_W_MASK | DESC_A_MASK);
2667
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2668
            } else
2669
#endif
2670
            {
2671
                raise_exception_err(EXCP0D_GPF, 0);
2672
            }
2673
        } else {
2674
            if ((new_ss & 3) != rpl)
2675
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2676
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2677
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2678
            if (!(ss_e2 & DESC_S_MASK) ||
2679
                (ss_e2 & DESC_CS_MASK) ||
2680
                !(ss_e2 & DESC_W_MASK))
2681
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2682
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2683
            if (dpl != rpl)
2684
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685
            if (!(ss_e2 & DESC_P_MASK))
2686
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2687
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2688
                                   get_seg_base(ss_e1, ss_e2),
2689
                                   get_seg_limit(ss_e1, ss_e2),
2690
                                   ss_e2);
2691
        }
2692

    
2693
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2694
                       get_seg_base(e1, e2),
2695
                       get_seg_limit(e1, e2),
2696
                       e2);
2697
        cpu_x86_set_cpl(env, rpl);
2698
        sp = new_esp;
2699
#ifdef TARGET_X86_64
2700
        if (env->hflags & HF_CS64_MASK)
2701
            sp_mask = -1;
2702
        else
2703
#endif
2704
            sp_mask = get_sp_mask(ss_e2);
2705

    
2706
        /* validate data segments */
2707
        validate_seg(R_ES, rpl);
2708
        validate_seg(R_DS, rpl);
2709
        validate_seg(R_FS, rpl);
2710
        validate_seg(R_GS, rpl);
2711

    
2712
        sp += addend;
2713
    }
2714
    SET_ESP(sp, sp_mask);
2715
    env->eip = new_eip;
2716
    if (is_iret) {
2717
        /* NOTE: 'cpl' is the _old_ CPL */
2718
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2719
        if (cpl == 0)
2720
            eflags_mask |= IOPL_MASK;
2721
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2722
        if (cpl <= iopl)
2723
            eflags_mask |= IF_MASK;
2724
        if (shift == 0)
2725
            eflags_mask &= 0xffff;
2726
        load_eflags(new_eflags, eflags_mask);
2727
    }
2728
    return;
2729

    
2730
 return_to_vm86:
2731
    POPL(ssp, sp, sp_mask, new_esp);
2732
    POPL(ssp, sp, sp_mask, new_ss);
2733
    POPL(ssp, sp, sp_mask, new_es);
2734
    POPL(ssp, sp, sp_mask, new_ds);
2735
    POPL(ssp, sp, sp_mask, new_fs);
2736
    POPL(ssp, sp, sp_mask, new_gs);
2737

    
2738
    /* modify processor state */
2739
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2740
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2741
    load_seg_vm(R_CS, new_cs & 0xffff);
2742
    cpu_x86_set_cpl(env, 3);
2743
    load_seg_vm(R_SS, new_ss & 0xffff);
2744
    load_seg_vm(R_ES, new_es & 0xffff);
2745
    load_seg_vm(R_DS, new_ds & 0xffff);
2746
    load_seg_vm(R_FS, new_fs & 0xffff);
2747
    load_seg_vm(R_GS, new_gs & 0xffff);
2748

    
2749
    env->eip = new_eip & 0xffff;
2750
    ESP = new_esp;
2751
}
2752

    
2753
void helper_iret_protected(int shift, int next_eip)
2754
{
2755
    int tss_selector, type;
2756
    uint32_t e1, e2;
2757

    
2758
    /* specific case for TSS */
2759
    if (env->eflags & NT_MASK) {
2760
#ifdef TARGET_X86_64
2761
        if (env->hflags & HF_LMA_MASK)
2762
            raise_exception_err(EXCP0D_GPF, 0);
2763
#endif
2764
        tss_selector = lduw_kernel(env->tr.base + 0);
2765
        if (tss_selector & 4)
2766
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2767
        if (load_segment(&e1, &e2, tss_selector) != 0)
2768
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2769
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2770
        /* NOTE: we check both segment and busy TSS */
2771
        if (type != 3)
2772
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2773
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2774
    } else {
2775
        helper_ret_protected(shift, 1, 0);
2776
    }
2777
    env->hflags2 &= ~HF2_NMI_MASK;
2778
#ifdef USE_KQEMU
2779
    if (kqemu_is_ok(env)) {
2780
        CC_OP = CC_OP_EFLAGS;
2781
        env->exception_index = -1;
2782
        cpu_loop_exit();
2783
    }
2784
#endif
2785
}
2786

    
2787
void helper_lret_protected(int shift, int addend)
2788
{
2789
    helper_ret_protected(shift, 0, addend);
2790
#ifdef USE_KQEMU
2791
    if (kqemu_is_ok(env)) {
2792
        env->exception_index = -1;
2793
        cpu_loop_exit();
2794
    }
2795
#endif
2796
}
2797

    
2798
void helper_sysenter(void)
2799
{
2800
    if (env->sysenter_cs == 0) {
2801
        raise_exception_err(EXCP0D_GPF, 0);
2802
    }
2803
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2804
    cpu_x86_set_cpl(env, 0);
2805

    
2806
#ifdef TARGET_X86_64
2807
    if (env->hflags & HF_LMA_MASK) {
2808
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809
                               0, 0xffffffff,
2810
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811
                               DESC_S_MASK |
2812
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2813
    } else
2814
#endif
2815
    {
2816
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2817
                               0, 0xffffffff,
2818
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819
                               DESC_S_MASK |
2820
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2821
    }
2822
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2823
                           0, 0xffffffff,
2824
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2825
                           DESC_S_MASK |
2826
                           DESC_W_MASK | DESC_A_MASK);
2827
    ESP = env->sysenter_esp;
2828
    EIP = env->sysenter_eip;
2829
}
2830

    
2831
void helper_sysexit(int dflag)
2832
{
2833
    int cpl;
2834

    
2835
    cpl = env->hflags & HF_CPL_MASK;
2836
    if (env->sysenter_cs == 0 || cpl != 0) {
2837
        raise_exception_err(EXCP0D_GPF, 0);
2838
    }
2839
    cpu_x86_set_cpl(env, 3);
2840
#ifdef TARGET_X86_64
2841
    if (dflag == 2) {
2842
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2847
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2848
                               0, 0xffffffff,
2849
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851
                               DESC_W_MASK | DESC_A_MASK);
2852
    } else
2853
#endif
2854
    {
2855
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2860
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2861
                               0, 0xffffffff,
2862
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864
                               DESC_W_MASK | DESC_A_MASK);
2865
    }
2866
    ESP = ECX;
2867
    EIP = EDX;
2868
#ifdef USE_KQEMU
2869
    if (kqemu_is_ok(env)) {
2870
        env->exception_index = -1;
2871
        cpu_loop_exit();
2872
    }
2873
#endif
2874
}
2875

    
2876
#if defined(CONFIG_USER_ONLY)
2877
target_ulong helper_read_crN(int reg)
2878
{
2879
    return 0;
2880
}
2881

    
2882
void helper_write_crN(int reg, target_ulong t0)
2883
{
2884
}
2885

    
2886
void helper_movl_drN_T0(int reg, target_ulong t0)
2887
{
2888
}
2889
#else
2890
target_ulong helper_read_crN(int reg)
2891
{
2892
    target_ulong val;
2893

    
2894
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2895
    switch(reg) {
2896
    default:
2897
        val = env->cr[reg];
2898
        break;
2899
    case 8:
2900
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2901
            val = cpu_get_apic_tpr(env);
2902
        } else {
2903
            val = env->v_tpr;
2904
        }
2905
        break;
2906
    }
2907
    return val;
2908
}
2909

    
2910
void helper_write_crN(int reg, target_ulong t0)
2911
{
2912
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2913
    switch(reg) {
2914
    case 0:
2915
        cpu_x86_update_cr0(env, t0);
2916
        break;
2917
    case 3:
2918
        cpu_x86_update_cr3(env, t0);
2919
        break;
2920
    case 4:
2921
        cpu_x86_update_cr4(env, t0);
2922
        break;
2923
    case 8:
2924
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2925
            cpu_set_apic_tpr(env, t0);
2926
        }
2927
        env->v_tpr = t0 & 0x0f;
2928
        break;
2929
    default:
2930
        env->cr[reg] = t0;
2931
        break;
2932
    }
2933
}
2934

    
2935
void helper_movl_drN_T0(int reg, target_ulong t0)
2936
{
2937
    int i;
2938

    
2939
    if (reg < 4) {
2940
        hw_breakpoint_remove(env, reg);
2941
        env->dr[reg] = t0;
2942
        hw_breakpoint_insert(env, reg);
2943
    } else if (reg == 7) {
2944
        for (i = 0; i < 4; i++)
2945
            hw_breakpoint_remove(env, i);
2946
        env->dr[7] = t0;
2947
        for (i = 0; i < 4; i++)
2948
            hw_breakpoint_insert(env, i);
2949
    } else
2950
        env->dr[reg] = t0;
2951
}
2952
#endif
2953

    
2954
void helper_lmsw(target_ulong t0)
2955
{
2956
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2957
       if already set to one. */
2958
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2959
    helper_write_crN(0, t0);
2960
}
2961

    
2962
void helper_clts(void)
2963
{
2964
    env->cr[0] &= ~CR0_TS_MASK;
2965
    env->hflags &= ~HF_TS_MASK;
2966
}
2967

    
2968
void helper_invlpg(target_ulong addr)
2969
{
2970
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2971
    tlb_flush_page(env, addr);
2972
}
2973

    
2974
void helper_rdtsc(void)
2975
{
2976
    uint64_t val;
2977

    
2978
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2979
        raise_exception(EXCP0D_GPF);
2980
    }
2981
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2982

    
2983
    val = cpu_get_tsc(env) + env->tsc_offset;
2984
    EAX = (uint32_t)(val);
2985
    EDX = (uint32_t)(val >> 32);
2986
}
2987

    
2988
void helper_rdpmc(void)
2989
{
2990
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2991
        raise_exception(EXCP0D_GPF);
2992
    }
2993
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2994
    
2995
    /* currently unimplemented */
2996
    raise_exception_err(EXCP06_ILLOP, 0);
2997
}
2998

    
2999
#if defined(CONFIG_USER_ONLY)
3000
void helper_wrmsr(void)
3001
{
3002
}
3003

    
3004
void helper_rdmsr(void)
3005
{
3006
}
3007
#else
3008
void helper_wrmsr(void)
3009
{
3010
    uint64_t val;
3011

    
3012
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3013

    
3014
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3015

    
3016
    switch((uint32_t)ECX) {
3017
    case MSR_IA32_SYSENTER_CS:
3018
        env->sysenter_cs = val & 0xffff;
3019
        break;
3020
    case MSR_IA32_SYSENTER_ESP:
3021
        env->sysenter_esp = val;
3022
        break;
3023
    case MSR_IA32_SYSENTER_EIP:
3024
        env->sysenter_eip = val;
3025
        break;
3026
    case MSR_IA32_APICBASE:
3027
        cpu_set_apic_base(env, val);
3028
        break;
3029
    case MSR_EFER:
3030
        {
3031
            uint64_t update_mask;
3032
            update_mask = 0;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3034
                update_mask |= MSR_EFER_SCE;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3036
                update_mask |= MSR_EFER_LME;
3037
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038
                update_mask |= MSR_EFER_FFXSR;
3039
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3040
                update_mask |= MSR_EFER_NXE;
3041
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3042
                update_mask |= MSR_EFER_SVME;
3043
            cpu_load_efer(env, (env->efer & ~update_mask) |
3044
                          (val & update_mask));
3045
        }
3046
        break;
3047
    case MSR_STAR:
3048
        env->star = val;
3049
        break;
3050
    case MSR_PAT:
3051
        env->pat = val;
3052
        break;
3053
    case MSR_VM_HSAVE_PA:
3054
        env->vm_hsave = val;
3055
        break;
3056
#ifdef TARGET_X86_64
3057
    case MSR_LSTAR:
3058
        env->lstar = val;
3059
        break;
3060
    case MSR_CSTAR:
3061
        env->cstar = val;
3062
        break;
3063
    case MSR_FMASK:
3064
        env->fmask = val;
3065
        break;
3066
    case MSR_FSBASE:
3067
        env->segs[R_FS].base = val;
3068
        break;
3069
    case MSR_GSBASE:
3070
        env->segs[R_GS].base = val;
3071
        break;
3072
    case MSR_KERNELGSBASE:
3073
        env->kernelgsbase = val;
3074
        break;
3075
#endif
3076
    default:
3077
        /* XXX: exception ? */
3078
        break;
3079
    }
3080
}
3081

    
3082
void helper_rdmsr(void)
3083
{
3084
    uint64_t val;
3085

    
3086
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3087

    
3088
    switch((uint32_t)ECX) {
3089
    case MSR_IA32_SYSENTER_CS:
3090
        val = env->sysenter_cs;
3091
        break;
3092
    case MSR_IA32_SYSENTER_ESP:
3093
        val = env->sysenter_esp;
3094
        break;
3095
    case MSR_IA32_SYSENTER_EIP:
3096
        val = env->sysenter_eip;
3097
        break;
3098
    case MSR_IA32_APICBASE:
3099
        val = cpu_get_apic_base(env);
3100
        break;
3101
    case MSR_EFER:
3102
        val = env->efer;
3103
        break;
3104
    case MSR_STAR:
3105
        val = env->star;
3106
        break;
3107
    case MSR_PAT:
3108
        val = env->pat;
3109
        break;
3110
    case MSR_VM_HSAVE_PA:
3111
        val = env->vm_hsave;
3112
        break;
3113
    case MSR_IA32_PERF_STATUS:
3114
        /* tsc_increment_by_tick */
3115
        val = 1000ULL;
3116
        /* CPU multiplier */
3117
        val |= (((uint64_t)4ULL) << 40);
3118
        break;
3119
#ifdef TARGET_X86_64
3120
    case MSR_LSTAR:
3121
        val = env->lstar;
3122
        break;
3123
    case MSR_CSTAR:
3124
        val = env->cstar;
3125
        break;
3126
    case MSR_FMASK:
3127
        val = env->fmask;
3128
        break;
3129
    case MSR_FSBASE:
3130
        val = env->segs[R_FS].base;
3131
        break;
3132
    case MSR_GSBASE:
3133
        val = env->segs[R_GS].base;
3134
        break;
3135
    case MSR_KERNELGSBASE:
3136
        val = env->kernelgsbase;
3137
        break;
3138
#endif
3139
#ifdef USE_KQEMU
3140
    case MSR_QPI_COMMBASE:
3141
        if (env->kqemu_enabled) {
3142
            val = kqemu_comm_base;
3143
        } else {
3144
            val = 0;
3145
        }
3146
        break;
3147
#endif
3148
    default:
3149
        /* XXX: exception ? */
3150
        val = 0;
3151
        break;
3152
    }
3153
    EAX = (uint32_t)(val);
3154
    EDX = (uint32_t)(val >> 32);
3155
}
3156
#endif
3157

    
3158
target_ulong helper_lsl(target_ulong selector1)
3159
{
3160
    unsigned int limit;
3161
    uint32_t e1, e2, eflags, selector;
3162
    int rpl, dpl, cpl, type;
3163

    
3164
    selector = selector1 & 0xffff;
3165
    eflags = helper_cc_compute_all(CC_OP);
3166
    if (load_segment(&e1, &e2, selector) != 0)
3167
        goto fail;
3168
    rpl = selector & 3;
3169
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3170
    cpl = env->hflags & HF_CPL_MASK;
3171
    if (e2 & DESC_S_MASK) {
3172
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3173
            /* conforming */
3174
        } else {
3175
            if (dpl < cpl || dpl < rpl)
3176
                goto fail;
3177
        }
3178
    } else {
3179
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3180
        switch(type) {
3181
        case 1:
3182
        case 2:
3183
        case 3:
3184
        case 9:
3185
        case 11:
3186
            break;
3187
        default:
3188
            goto fail;
3189
        }
3190
        if (dpl < cpl || dpl < rpl) {
3191
        fail:
3192
            CC_SRC = eflags & ~CC_Z;
3193
            return 0;
3194
        }
3195
    }
3196
    limit = get_seg_limit(e1, e2);
3197
    CC_SRC = eflags | CC_Z;
3198
    return limit;
3199
}
3200

    
3201
target_ulong helper_lar(target_ulong selector1)
3202
{
3203
    uint32_t e1, e2, eflags, selector;
3204
    int rpl, dpl, cpl, type;
3205

    
3206
    selector = selector1 & 0xffff;
3207
    eflags = helper_cc_compute_all(CC_OP);
3208
    if ((selector & 0xfffc) == 0)
3209
        goto fail;
3210
    if (load_segment(&e1, &e2, selector) != 0)
3211
        goto fail;
3212
    rpl = selector & 3;
3213
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3214
    cpl = env->hflags & HF_CPL_MASK;
3215
    if (e2 & DESC_S_MASK) {
3216
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3217
            /* conforming */
3218
        } else {
3219
            if (dpl < cpl || dpl < rpl)
3220
                goto fail;
3221
        }
3222
    } else {
3223
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3224
        switch(type) {
3225
        case 1:
3226
        case 2:
3227
        case 3:
3228
        case 4:
3229
        case 5:
3230
        case 9:
3231
        case 11:
3232
        case 12:
3233
            break;
3234
        default:
3235
            goto fail;
3236
        }
3237
        if (dpl < cpl || dpl < rpl) {
3238
        fail:
3239
            CC_SRC = eflags & ~CC_Z;
3240
            return 0;
3241
        }
3242
    }
3243
    CC_SRC = eflags | CC_Z;
3244
    return e2 & 0x00f0ff00;
3245
}
3246

    
3247
void helper_verr(target_ulong selector1)
3248
{
3249
    uint32_t e1, e2, eflags, selector;
3250
    int rpl, dpl, cpl;
3251

    
3252
    selector = selector1 & 0xffff;
3253
    eflags = helper_cc_compute_all(CC_OP);
3254
    if ((selector & 0xfffc) == 0)
3255
        goto fail;
3256
    if (load_segment(&e1, &e2, selector) != 0)
3257
        goto fail;
3258
    if (!(e2 & DESC_S_MASK))
3259
        goto fail;
3260
    rpl = selector & 3;
3261
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3262
    cpl = env->hflags & HF_CPL_MASK;
3263
    if (e2 & DESC_CS_MASK) {
3264
        if (!(e2 & DESC_R_MASK))
3265
            goto fail;
3266
        if (!(e2 & DESC_C_MASK)) {
3267
            if (dpl < cpl || dpl < rpl)
3268
                goto fail;
3269
        }
3270
    } else {
3271
        if (dpl < cpl || dpl < rpl) {
3272
        fail:
3273
            CC_SRC = eflags & ~CC_Z;
3274
            return;
3275
        }
3276
    }
3277
    CC_SRC = eflags | CC_Z;
3278
}
3279

    
3280
void helper_verw(target_ulong selector1)
3281
{
3282
    uint32_t e1, e2, eflags, selector;
3283
    int rpl, dpl, cpl;
3284

    
3285
    selector = selector1 & 0xffff;
3286
    eflags = helper_cc_compute_all(CC_OP);
3287
    if ((selector & 0xfffc) == 0)
3288
        goto fail;
3289
    if (load_segment(&e1, &e2, selector) != 0)
3290
        goto fail;
3291
    if (!(e2 & DESC_S_MASK))
3292
        goto fail;
3293
    rpl = selector & 3;
3294
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3295
    cpl = env->hflags & HF_CPL_MASK;
3296
    if (e2 & DESC_CS_MASK) {
3297
        goto fail;
3298
    } else {
3299
        if (dpl < cpl || dpl < rpl)
3300
            goto fail;
3301
        if (!(e2 & DESC_W_MASK)) {
3302
        fail:
3303
            CC_SRC = eflags & ~CC_Z;
3304
            return;
3305
        }
3306
    }
3307
    CC_SRC = eflags | CC_Z;
3308
}
3309

    
3310
/* x87 FPU helpers */
3311

    
3312
static void fpu_set_exception(int mask)
3313
{
3314
    env->fpus |= mask;
3315
    if (env->fpus & (~env->fpuc & FPUC_EM))
3316
        env->fpus |= FPUS_SE | FPUS_B;
3317
}
3318

    
3319
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3320
{
3321
    if (b == 0.0)
3322
        fpu_set_exception(FPUS_ZE);
3323
    return a / b;
3324
}
3325

    
3326
void fpu_raise_exception(void)
3327
{
3328
    if (env->cr[0] & CR0_NE_MASK) {
3329
        raise_exception(EXCP10_COPR);
3330
    }
3331
#if !defined(CONFIG_USER_ONLY)
3332
    else {
3333
        cpu_set_ferr(env);
3334
    }
3335
#endif
3336
}
3337

    
3338
void helper_flds_FT0(uint32_t val)
3339
{
3340
    union {
3341
        float32 f;
3342
        uint32_t i;
3343
    } u;
3344
    u.i = val;
3345
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3346
}
3347

    
3348
void helper_fldl_FT0(uint64_t val)
3349
{
3350
    union {
3351
        float64 f;
3352
        uint64_t i;
3353
    } u;
3354
    u.i = val;
3355
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3356
}
3357

    
3358
void helper_fildl_FT0(int32_t val)
3359
{
3360
    FT0 = int32_to_floatx(val, &env->fp_status);
3361
}
3362

    
3363
void helper_flds_ST0(uint32_t val)
3364
{
3365
    int new_fpstt;
3366
    union {
3367
        float32 f;
3368
        uint32_t i;
3369
    } u;
3370
    new_fpstt = (env->fpstt - 1) & 7;
3371
    u.i = val;
3372
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3373
    env->fpstt = new_fpstt;
3374
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3375
}
3376

    
3377
void helper_fldl_ST0(uint64_t val)
3378
{
3379
    int new_fpstt;
3380
    union {
3381
        float64 f;
3382
        uint64_t i;
3383
    } u;
3384
    new_fpstt = (env->fpstt - 1) & 7;
3385
    u.i = val;
3386
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3387
    env->fpstt = new_fpstt;
3388
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3389
}
3390

    
3391
void helper_fildl_ST0(int32_t val)
3392
{
3393
    int new_fpstt;
3394
    new_fpstt = (env->fpstt - 1) & 7;
3395
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3396
    env->fpstt = new_fpstt;
3397
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3398
}
3399

    
3400
void helper_fildll_ST0(int64_t val)
3401
{
3402
    int new_fpstt;
3403
    new_fpstt = (env->fpstt - 1) & 7;
3404
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3405
    env->fpstt = new_fpstt;
3406
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3407
}
3408

    
3409
uint32_t helper_fsts_ST0(void)
3410
{
3411
    union {
3412
        float32 f;
3413
        uint32_t i;
3414
    } u;
3415
    u.f = floatx_to_float32(ST0, &env->fp_status);
3416
    return u.i;
3417
}
3418

    
3419
uint64_t helper_fstl_ST0(void)
3420
{
3421
    union {
3422
        float64 f;
3423
        uint64_t i;
3424
    } u;
3425
    u.f = floatx_to_float64(ST0, &env->fp_status);
3426
    return u.i;
3427
}
3428

    
3429
int32_t helper_fist_ST0(void)
3430
{
3431
    int32_t val;
3432
    val = floatx_to_int32(ST0, &env->fp_status);
3433
    if (val != (int16_t)val)
3434
        val = -32768;
3435
    return val;
3436
}
3437

    
3438
int32_t helper_fistl_ST0(void)
3439
{
3440
    int32_t val;
3441
    val = floatx_to_int32(ST0, &env->fp_status);
3442
    return val;
3443
}
3444

    
3445
int64_t helper_fistll_ST0(void)
3446
{
3447
    int64_t val;
3448
    val = floatx_to_int64(ST0, &env->fp_status);
3449
    return val;
3450
}
3451

    
3452
int32_t helper_fistt_ST0(void)
3453
{
3454
    int32_t val;
3455
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3456
    if (val != (int16_t)val)
3457
        val = -32768;
3458
    return val;
3459
}
3460

    
3461
int32_t helper_fisttl_ST0(void)
3462
{
3463
    int32_t val;
3464
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3465
    return val;
3466
}
3467

    
3468
int64_t helper_fisttll_ST0(void)
3469
{
3470
    int64_t val;
3471
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3472
    return val;
3473
}
3474

    
3475
void helper_fldt_ST0(target_ulong ptr)
3476
{
3477
    int new_fpstt;
3478
    new_fpstt = (env->fpstt - 1) & 7;
3479
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3480
    env->fpstt = new_fpstt;
3481
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3482
}
3483

    
3484
void helper_fstt_ST0(target_ulong ptr)
3485
{
3486
    helper_fstt(ST0, ptr);
3487
}
3488

    
3489
void helper_fpush(void)
3490
{
3491
    fpush();
3492
}
3493

    
3494
void helper_fpop(void)
3495
{
3496
    fpop();
3497
}
3498

    
3499
void helper_fdecstp(void)
3500
{
3501
    env->fpstt = (env->fpstt - 1) & 7;
3502
    env->fpus &= (~0x4700);
3503
}
3504

    
3505
void helper_fincstp(void)
3506
{
3507
    env->fpstt = (env->fpstt + 1) & 7;
3508
    env->fpus &= (~0x4700);
3509
}
3510

    
3511
/* FPU move */
3512

    
3513
void helper_ffree_STN(int st_index)
3514
{
3515
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3516
}
3517

    
3518
void helper_fmov_ST0_FT0(void)
3519
{
3520
    ST0 = FT0;
3521
}
3522

    
3523
void helper_fmov_FT0_STN(int st_index)
3524
{
3525
    FT0 = ST(st_index);
3526
}
3527

    
3528
void helper_fmov_ST0_STN(int st_index)
3529
{
3530
    ST0 = ST(st_index);
3531
}
3532

    
3533
void helper_fmov_STN_ST0(int st_index)
3534
{
3535
    ST(st_index) = ST0;
3536
}
3537

    
3538
void helper_fxchg_ST0_STN(int st_index)
3539
{
3540
    CPU86_LDouble tmp;
3541
    tmp = ST(st_index);
3542
    ST(st_index) = ST0;
3543
    ST0 = tmp;
3544
}
3545

    
3546
/* FPU operations */
3547

    
3548
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3549

    
3550
void helper_fcom_ST0_FT0(void)
3551
{
3552
    int ret;
3553

    
3554
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3555
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3556
    FORCE_RET();
3557
}
3558

    
3559
void helper_fucom_ST0_FT0(void)
3560
{
3561
    int ret;
3562

    
3563
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3564
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3565
    FORCE_RET();
3566
}
3567

    
3568
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3569

    
3570
void helper_fcomi_ST0_FT0(void)
3571
{
3572
    int eflags;
3573
    int ret;
3574

    
3575
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3576
    eflags = helper_cc_compute_all(CC_OP);
3577
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3578
    CC_SRC = eflags;
3579
    FORCE_RET();
3580
}
3581

    
3582
void helper_fucomi_ST0_FT0(void)
3583
{
3584
    int eflags;
3585
    int ret;
3586

    
3587
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3588
    eflags = helper_cc_compute_all(CC_OP);
3589
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3590
    CC_SRC = eflags;
3591
    FORCE_RET();
3592
}
3593

    
3594
void helper_fadd_ST0_FT0(void)
3595
{
3596
    ST0 += FT0;
3597
}
3598

    
3599
void helper_fmul_ST0_FT0(void)
3600
{
3601
    ST0 *= FT0;
3602
}
3603

    
3604
void helper_fsub_ST0_FT0(void)
3605
{
3606
    ST0 -= FT0;
3607
}
3608

    
3609
void helper_fsubr_ST0_FT0(void)
3610
{
3611
    ST0 = FT0 - ST0;
3612
}
3613

    
3614
void helper_fdiv_ST0_FT0(void)
3615
{
3616
    ST0 = helper_fdiv(ST0, FT0);
3617
}
3618

    
3619
void helper_fdivr_ST0_FT0(void)
3620
{
3621
    ST0 = helper_fdiv(FT0, ST0);
3622
}
3623

    
3624
/* fp operations between STN and ST0 */
3625

    
3626
void helper_fadd_STN_ST0(int st_index)
3627
{
3628
    ST(st_index) += ST0;
3629
}
3630

    
3631
void helper_fmul_STN_ST0(int st_index)
3632
{
3633
    ST(st_index) *= ST0;
3634
}
3635

    
3636
void helper_fsub_STN_ST0(int st_index)
3637
{
3638
    ST(st_index) -= ST0;
3639
}
3640

    
3641
void helper_fsubr_STN_ST0(int st_index)
3642
{
3643
    CPU86_LDouble *p;
3644
    p = &ST(st_index);
3645
    *p = ST0 - *p;
3646
}
3647

    
3648
void helper_fdiv_STN_ST0(int st_index)
3649
{
3650
    CPU86_LDouble *p;
3651
    p = &ST(st_index);
3652
    *p = helper_fdiv(*p, ST0);
3653
}
3654

    
3655
void helper_fdivr_STN_ST0(int st_index)
3656
{
3657
    CPU86_LDouble *p;
3658
    p = &ST(st_index);
3659
    *p = helper_fdiv(ST0, *p);
3660
}
3661

    
3662
/* misc FPU operations */
3663
void helper_fchs_ST0(void)
3664
{
3665
    ST0 = floatx_chs(ST0);
3666
}
3667

    
3668
void helper_fabs_ST0(void)
3669
{
3670
    ST0 = floatx_abs(ST0);
3671
}
3672

    
3673
void helper_fld1_ST0(void)
3674
{
3675
    ST0 = f15rk[1];
3676
}
3677

    
3678
void helper_fldl2t_ST0(void)
3679
{
3680
    ST0 = f15rk[6];
3681
}
3682

    
3683
void helper_fldl2e_ST0(void)
3684
{
3685
    ST0 = f15rk[5];
3686
}
3687

    
3688
void helper_fldpi_ST0(void)
3689
{
3690
    ST0 = f15rk[2];
3691
}
3692

    
3693
void helper_fldlg2_ST0(void)
3694
{
3695
    ST0 = f15rk[3];
3696
}
3697

    
3698
void helper_fldln2_ST0(void)
3699
{
3700
    ST0 = f15rk[4];
3701
}
3702

    
3703
void helper_fldz_ST0(void)
3704
{
3705
    ST0 = f15rk[0];
3706
}
3707

    
3708
void helper_fldz_FT0(void)
3709
{
3710
    FT0 = f15rk[0];
3711
}
3712

    
3713
uint32_t helper_fnstsw(void)
3714
{
3715
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3716
}
3717

    
3718
uint32_t helper_fnstcw(void)
3719
{
3720
    return env->fpuc;
3721
}
3722

    
3723
static void update_fp_status(void)
3724
{
3725
    int rnd_type;
3726

    
3727
    /* set rounding mode */
3728
    switch(env->fpuc & RC_MASK) {
3729
    default:
3730
    case RC_NEAR:
3731
        rnd_type = float_round_nearest_even;
3732
        break;
3733
    case RC_DOWN:
3734
        rnd_type = float_round_down;
3735
        break;
3736
    case RC_UP:
3737
        rnd_type = float_round_up;
3738
        break;
3739
    case RC_CHOP:
3740
        rnd_type = float_round_to_zero;
3741
        break;
3742
    }
3743
    set_float_rounding_mode(rnd_type, &env->fp_status);
3744
#ifdef FLOATX80
3745
    switch((env->fpuc >> 8) & 3) {
3746
    case 0:
3747
        rnd_type = 32;
3748
        break;
3749
    case 2:
3750
        rnd_type = 64;
3751
        break;
3752
    case 3:
3753
    default:
3754
        rnd_type = 80;
3755
        break;
3756
    }
3757
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3758
#endif
3759
}
3760

    
3761
void helper_fldcw(uint32_t val)
3762
{
3763
    env->fpuc = val;
3764
    update_fp_status();
3765
}
3766

    
3767
void helper_fclex(void)
3768
{
3769
    env->fpus &= 0x7f00;
3770
}
3771

    
3772
void helper_fwait(void)
3773
{
3774
    if (env->fpus & FPUS_SE)
3775
        fpu_raise_exception();
3776
    FORCE_RET();
3777
}
3778

    
3779
void helper_fninit(void)
3780
{
3781
    env->fpus = 0;
3782
    env->fpstt = 0;
3783
    env->fpuc = 0x37f;
3784
    env->fptags[0] = 1;
3785
    env->fptags[1] = 1;
3786
    env->fptags[2] = 1;
3787
    env->fptags[3] = 1;
3788
    env->fptags[4] = 1;
3789
    env->fptags[5] = 1;
3790
    env->fptags[6] = 1;
3791
    env->fptags[7] = 1;
3792
}
3793

    
3794
/* BCD ops */
3795

    
3796
void helper_fbld_ST0(target_ulong ptr)
3797
{
3798
    CPU86_LDouble tmp;
3799
    uint64_t val;
3800
    unsigned int v;
3801
    int i;
3802

    
3803
    val = 0;
3804
    for(i = 8; i >= 0; i--) {
3805
        v = ldub(ptr + i);
3806
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3807
    }
3808
    tmp = val;
3809
    if (ldub(ptr + 9) & 0x80)
3810
        tmp = -tmp;
3811
    fpush();
3812
    ST0 = tmp;
3813
}
3814

    
3815
void helper_fbst_ST0(target_ulong ptr)
3816
{
3817
    int v;
3818
    target_ulong mem_ref, mem_end;
3819
    int64_t val;
3820

    
3821
    val = floatx_to_int64(ST0, &env->fp_status);
3822
    mem_ref = ptr;
3823
    mem_end = mem_ref + 9;
3824
    if (val < 0) {
3825
        stb(mem_end, 0x80);
3826
        val = -val;
3827
    } else {
3828
        stb(mem_end, 0x00);
3829
    }
3830
    while (mem_ref < mem_end) {
3831
        if (val == 0)
3832
            break;
3833
        v = val % 100;
3834
        val = val / 100;
3835
        v = ((v / 10) << 4) | (v % 10);
3836
        stb(mem_ref++, v);
3837
    }
3838
    while (mem_ref < mem_end) {
3839
        stb(mem_ref++, 0);
3840
    }
3841
}
3842

    
3843
void helper_f2xm1(void)
3844
{
3845
    ST0 = pow(2.0,ST0) - 1.0;
3846
}
3847

    
3848
void helper_fyl2x(void)
3849
{
3850
    CPU86_LDouble fptemp;
3851

    
3852
    fptemp = ST0;
3853
    if (fptemp>0.0){
3854
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3855
        ST1 *= fptemp;
3856
        fpop();
3857
    } else {
3858
        env->fpus &= (~0x4700);
3859
        env->fpus |= 0x400;
3860
    }
3861
}
3862

    
3863
void helper_fptan(void)
3864
{
3865
    CPU86_LDouble fptemp;
3866

    
3867
    fptemp = ST0;
3868
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3869
        env->fpus |= 0x400;
3870
    } else {
3871
        ST0 = tan(fptemp);
3872
        fpush();
3873
        ST0 = 1.0;
3874
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3875
        /* the above code is for  |arg| < 2**52 only */
3876
    }
3877
}
3878

    
3879
void helper_fpatan(void)
3880
{
3881
    CPU86_LDouble fptemp, fpsrcop;
3882

    
3883
    fpsrcop = ST1;
3884
    fptemp = ST0;
3885
    ST1 = atan2(fpsrcop,fptemp);
3886
    fpop();
3887
}
3888

    
3889
void helper_fxtract(void)
3890
{
3891
    CPU86_LDoubleU temp;
3892
    unsigned int expdif;
3893

    
3894
    temp.d = ST0;
3895
    expdif = EXPD(temp) - EXPBIAS;
3896
    /*DP exponent bias*/
3897
    ST0 = expdif;
3898
    fpush();
3899
    BIASEXPONENT(temp);
3900
    ST0 = temp.d;
3901
}
3902

    
3903
void helper_fprem1(void)
3904
{
3905
    CPU86_LDouble dblq, fpsrcop, fptemp;
3906
    CPU86_LDoubleU fpsrcop1, fptemp1;
3907
    int expdif;
3908
    signed long long int q;
3909

    
3910
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3911
        ST0 = 0.0 / 0.0; /* NaN */
3912
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3913
        return;
3914
    }
3915

    
3916
    fpsrcop = ST0;
3917
    fptemp = ST1;
3918
    fpsrcop1.d = fpsrcop;
3919
    fptemp1.d = fptemp;
3920
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3921

    
3922
    if (expdif < 0) {
3923
        /* optimisation? taken from the AMD docs */
3924
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3925
        /* ST0 is unchanged */
3926
        return;
3927
    }
3928

    
3929
    if (expdif < 53) {
3930
        dblq = fpsrcop / fptemp;
3931
        /* round dblq towards nearest integer */
3932
        dblq = rint(dblq);
3933
        ST0 = fpsrcop - fptemp * dblq;
3934

    
3935
        /* convert dblq to q by truncating towards zero */
3936
        if (dblq < 0.0)
3937
           q = (signed long long int)(-dblq);
3938
        else
3939
           q = (signed long long int)dblq;
3940

    
3941
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3942
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3943
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3944
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3945
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3946
    } else {
3947
        env->fpus |= 0x400;  /* C2 <-- 1 */
3948
        fptemp = pow(2.0, expdif - 50);
3949
        fpsrcop = (ST0 / ST1) / fptemp;
3950
        /* fpsrcop = integer obtained by chopping */
3951
        fpsrcop = (fpsrcop < 0.0) ?
3952
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3953
        ST0 -= (ST1 * fpsrcop * fptemp);
3954
    }
3955
}
3956

    
3957
void helper_fprem(void)
3958
{
3959
    CPU86_LDouble dblq, fpsrcop, fptemp;
3960
    CPU86_LDoubleU fpsrcop1, fptemp1;
3961
    int expdif;
3962
    signed long long int q;
3963

    
3964
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3965
       ST0 = 0.0 / 0.0; /* NaN */
3966
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3967
       return;
3968
    }
3969

    
3970
    fpsrcop = (CPU86_LDouble)ST0;
3971
    fptemp = (CPU86_LDouble)ST1;
3972
    fpsrcop1.d = fpsrcop;
3973
    fptemp1.d = fptemp;
3974
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3975

    
3976
    if (expdif < 0) {
3977
        /* optimisation? taken from the AMD docs */
3978
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3979
        /* ST0 is unchanged */
3980
        return;
3981
    }
3982

    
3983
    if ( expdif < 53 ) {
3984
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3985
        /* round dblq towards zero */
3986
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3987
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3988

    
3989
        /* convert dblq to q by truncating towards zero */
3990
        if (dblq < 0.0)
3991
           q = (signed long long int)(-dblq);
3992
        else
3993
           q = (signed long long int)dblq;
3994

    
3995
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3996
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3997
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3998
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3999
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4000
    } else {
4001
        int N = 32 + (expdif % 32); /* as per AMD docs */
4002
        env->fpus |= 0x400;  /* C2 <-- 1 */
4003
        fptemp = pow(2.0, (double)(expdif - N));
4004
        fpsrcop = (ST0 / ST1) / fptemp;
4005
        /* fpsrcop = integer obtained by chopping */
4006
        fpsrcop = (fpsrcop < 0.0) ?
4007
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4008
        ST0 -= (ST1 * fpsrcop * fptemp);
4009
    }
4010
}
4011

    
4012
void helper_fyl2xp1(void)
4013
{
4014
    CPU86_LDouble fptemp;
4015

    
4016
    fptemp = ST0;
4017
    if ((fptemp+1.0)>0.0) {
4018
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4019
        ST1 *= fptemp;
4020
        fpop();
4021
    } else {
4022
        env->fpus &= (~0x4700);
4023
        env->fpus |= 0x400;
4024
    }
4025
}
4026

    
4027
void helper_fsqrt(void)
4028
{
4029
    CPU86_LDouble fptemp;
4030

    
4031
    fptemp = ST0;
4032
    if (fptemp<0.0) {
4033
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4034
        env->fpus |= 0x400;
4035
    }
4036
    ST0 = sqrt(fptemp);
4037
}
4038

    
4039
void helper_fsincos(void)
4040
{
4041
    CPU86_LDouble fptemp;
4042

    
4043
    fptemp = ST0;
4044
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4045
        env->fpus |= 0x400;
4046
    } else {
4047
        ST0 = sin(fptemp);
4048
        fpush();
4049
        ST0 = cos(fptemp);
4050
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4051
        /* the above code is for  |arg| < 2**63 only */
4052
    }
4053
}
4054

    
4055
void helper_frndint(void)
4056
{
4057
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4058
}
4059

    
4060
void helper_fscale(void)
4061
{
4062
    ST0 = ldexp (ST0, (int)(ST1));
4063
}
4064

    
4065
void helper_fsin(void)
4066
{
4067
    CPU86_LDouble fptemp;
4068

    
4069
    fptemp = ST0;
4070
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4071
        env->fpus |= 0x400;
4072
    } else {
4073
        ST0 = sin(fptemp);
4074
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4075
        /* the above code is for  |arg| < 2**53 only */
4076
    }
4077
}
4078

    
4079
void helper_fcos(void)
4080
{
4081
    CPU86_LDouble fptemp;
4082

    
4083
    fptemp = ST0;
4084
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4085
        env->fpus |= 0x400;
4086
    } else {
4087
        ST0 = cos(fptemp);
4088
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4089
        /* the above code is for  |arg5 < 2**63 only */
4090
    }
4091
}
4092

    
4093
void helper_fxam_ST0(void)
4094
{
4095
    CPU86_LDoubleU temp;
4096
    int expdif;
4097

    
4098
    temp.d = ST0;
4099

    
4100
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4101
    if (SIGND(temp))
4102
        env->fpus |= 0x200; /* C1 <-- 1 */
4103

    
4104
    /* XXX: test fptags too */
4105
    expdif = EXPD(temp);
4106
    if (expdif == MAXEXPD) {
4107
#ifdef USE_X86LDOUBLE
4108
        if (MANTD(temp) == 0x8000000000000000ULL)
4109
#else
4110
        if (MANTD(temp) == 0)
4111
#endif
4112
            env->fpus |=  0x500 /*Infinity*/;
4113
        else
4114
            env->fpus |=  0x100 /*NaN*/;
4115
    } else if (expdif == 0) {
4116
        if (MANTD(temp) == 0)
4117
            env->fpus |=  0x4000 /*Zero*/;
4118
        else
4119
            env->fpus |= 0x4400 /*Denormal*/;
4120
    } else {
4121
        env->fpus |= 0x400;
4122
    }
4123
}
4124

    
4125
void helper_fstenv(target_ulong ptr, int data32)
4126
{
4127
    int fpus, fptag, exp, i;
4128
    uint64_t mant;
4129
    CPU86_LDoubleU tmp;
4130

    
4131
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4132
    fptag = 0;
4133
    for (i=7; i>=0; i--) {
4134
        fptag <<= 2;
4135
        if (env->fptags[i]) {
4136
            fptag |= 3;
4137
        } else {
4138
            tmp.d = env->fpregs[i].d;
4139
            exp = EXPD(tmp);
4140
            mant = MANTD(tmp);
4141
            if (exp == 0 && mant == 0) {
4142
                /* zero */
4143
                fptag |= 1;
4144
            } else if (exp == 0 || exp == MAXEXPD
4145
#ifdef USE_X86LDOUBLE
4146
                       || (mant & (1LL << 63)) == 0
4147
#endif
4148
                       ) {
4149
                /* NaNs, infinity, denormal */
4150
                fptag |= 2;
4151
            }
4152
        }
4153
    }
4154
    if (data32) {
4155
        /* 32 bit */
4156
        stl(ptr, env->fpuc);
4157
        stl(ptr + 4, fpus);
4158
        stl(ptr + 8, fptag);
4159
        stl(ptr + 12, 0); /* fpip */
4160
        stl(ptr + 16, 0); /* fpcs */
4161
        stl(ptr + 20, 0); /* fpoo */
4162
        stl(ptr + 24, 0); /* fpos */
4163
    } else {
4164
        /* 16 bit */
4165
        stw(ptr, env->fpuc);
4166
        stw(ptr + 2, fpus);
4167
        stw(ptr + 4, fptag);
4168
        stw(ptr + 6, 0);
4169
        stw(ptr + 8, 0);
4170
        stw(ptr + 10, 0);
4171
        stw(ptr + 12, 0);
4172
    }
4173
}
4174

    
4175
void helper_fldenv(target_ulong ptr, int data32)
4176
{
4177
    int i, fpus, fptag;
4178

    
4179
    if (data32) {
4180
        env->fpuc = lduw(ptr);
4181
        fpus = lduw(ptr + 4);
4182
        fptag = lduw(ptr + 8);
4183
    }
4184
    else {
4185
        env->fpuc = lduw(ptr);
4186
        fpus = lduw(ptr + 2);
4187
        fptag = lduw(ptr + 4);
4188
    }
4189
    env->fpstt = (fpus >> 11) & 7;
4190
    env->fpus = fpus & ~0x3800;
4191
    for(i = 0;i < 8; i++) {
4192
        env->fptags[i] = ((fptag & 3) == 3);
4193
        fptag >>= 2;
4194
    }
4195
}
4196

    
4197
void helper_fsave(target_ulong ptr, int data32)
4198
{
4199
    CPU86_LDouble tmp;
4200
    int i;
4201

    
4202
    helper_fstenv(ptr, data32);
4203

    
4204
    ptr += (14 << data32);
4205
    for(i = 0;i < 8; i++) {
4206
        tmp = ST(i);
4207
        helper_fstt(tmp, ptr);
4208
        ptr += 10;
4209
    }
4210

    
4211
    /* fninit */
4212
    env->fpus = 0;
4213
    env->fpstt = 0;
4214
    env->fpuc = 0x37f;
4215
    env->fptags[0] = 1;
4216
    env->fptags[1] = 1;
4217
    env->fptags[2] = 1;
4218
    env->fptags[3] = 1;
4219
    env->fptags[4] = 1;
4220
    env->fptags[5] = 1;
4221
    env->fptags[6] = 1;
4222
    env->fptags[7] = 1;
4223
}
4224

    
4225
void helper_frstor(target_ulong ptr, int data32)
4226
{
4227
    CPU86_LDouble tmp;
4228
    int i;
4229

    
4230
    helper_fldenv(ptr, data32);
4231
    ptr += (14 << data32);
4232

    
4233
    for(i = 0;i < 8; i++) {
4234
        tmp = helper_fldt(ptr);
4235
        ST(i) = tmp;
4236
        ptr += 10;
4237
    }
4238
}
4239

    
4240
void helper_fxsave(target_ulong ptr, int data64)
4241
{
4242
    int fpus, fptag, i, nb_xmm_regs;
4243
    CPU86_LDouble tmp;
4244
    target_ulong addr;
4245

    
4246
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4247
    fptag = 0;
4248
    for(i = 0; i < 8; i++) {
4249
        fptag |= (env->fptags[i] << i);
4250
    }
4251
    stw(ptr, env->fpuc);
4252
    stw(ptr + 2, fpus);
4253
    stw(ptr + 4, fptag ^ 0xff);
4254
#ifdef TARGET_X86_64
4255
    if (data64) {
4256
        stq(ptr + 0x08, 0); /* rip */
4257
        stq(ptr + 0x10, 0); /* rdp */
4258
    } else 
4259
#endif
4260
    {
4261
        stl(ptr + 0x08, 0); /* eip */
4262
        stl(ptr + 0x0c, 0); /* sel  */
4263
        stl(ptr + 0x10, 0); /* dp */
4264
        stl(ptr + 0x14, 0); /* sel  */
4265
    }
4266

    
4267
    addr = ptr + 0x20;
4268
    for(i = 0;i < 8; i++) {
4269
        tmp = ST(i);
4270
        helper_fstt(tmp, addr);
4271
        addr += 16;
4272
    }
4273

    
4274
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4275
        /* XXX: finish it */
4276
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4277
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4278
        if (env->hflags & HF_CS64_MASK)
4279
            nb_xmm_regs = 16;
4280
        else
4281
            nb_xmm_regs = 8;
4282
        addr = ptr + 0xa0;
4283
        for(i = 0; i < nb_xmm_regs; i++) {
4284
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4285
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4286
            addr += 16;
4287
        }
4288
    }
4289
}
4290

    
4291
void helper_fxrstor(target_ulong ptr, int data64)
4292
{
4293
    int i, fpus, fptag, nb_xmm_regs;
4294
    CPU86_LDouble tmp;
4295
    target_ulong addr;
4296

    
4297
    env->fpuc = lduw(ptr);
4298
    fpus = lduw(ptr + 2);
4299
    fptag = lduw(ptr + 4);
4300
    env->fpstt = (fpus >> 11) & 7;
4301
    env->fpus = fpus & ~0x3800;
4302
    fptag ^= 0xff;
4303
    for(i = 0;i < 8; i++) {
4304
        env->fptags[i] = ((fptag >> i) & 1);
4305
    }
4306

    
4307
    addr = ptr + 0x20;
4308
    for(i = 0;i < 8; i++) {
4309
        tmp = helper_fldt(addr);
4310
        ST(i) = tmp;
4311
        addr += 16;
4312
    }
4313

    
4314
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4315
        /* XXX: finish it */
4316
        env->mxcsr = ldl(ptr + 0x18);
4317
        //ldl(ptr + 0x1c);
4318
        if (env->hflags & HF_CS64_MASK)
4319
            nb_xmm_regs = 16;
4320
        else
4321
            nb_xmm_regs = 8;
4322
        addr = ptr + 0xa0;
4323
        for(i = 0; i < nb_xmm_regs; i++) {
4324
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4325
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4326
            addr += 16;
4327
        }
4328
    }
4329
}
4330

    
4331
#ifndef USE_X86LDOUBLE
4332

    
4333
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4334
{
4335
    CPU86_LDoubleU temp;
4336
    int e;
4337

    
4338
    temp.d = f;
4339
    /* mantissa */
4340
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4341
    /* exponent + sign */
4342
    e = EXPD(temp) - EXPBIAS + 16383;
4343
    e |= SIGND(temp) >> 16;
4344
    *pexp = e;
4345
}
4346

    
4347
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4348
{
4349
    CPU86_LDoubleU temp;
4350
    int e;
4351
    uint64_t ll;
4352

    
4353
    /* XXX: handle overflow ? */
4354
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4355
    e |= (upper >> 4) & 0x800; /* sign */
4356
    ll = (mant >> 11) & ((1LL << 52) - 1);
4357
#ifdef __arm__
4358
    temp.l.upper = (e << 20) | (ll >> 32);
4359
    temp.l.lower = ll;
4360
#else
4361
    temp.ll = ll | ((uint64_t)e << 52);
4362
#endif
4363
    return temp.d;
4364
}
4365

    
4366
#else
4367

    
4368
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4369
{
4370
    CPU86_LDoubleU temp;
4371

    
4372
    temp.d = f;
4373
    *pmant = temp.l.lower;
4374
    *pexp = temp.l.upper;
4375
}
4376

    
4377
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4378
{
4379
    CPU86_LDoubleU temp;
4380

    
4381
    temp.l.upper = upper;
4382
    temp.l.lower = mant;
4383
    return temp.d;
4384
}
4385
#endif
4386

    
4387
#ifdef TARGET_X86_64
4388

    
4389
//#define DEBUG_MULDIV
4390

    
4391
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4392
{
4393
    *plow += a;
4394
    /* carry test */
4395
    if (*plow < a)
4396
        (*phigh)++;
4397
    *phigh += b;
4398
}
4399

    
4400
static void neg128(uint64_t *plow, uint64_t *phigh)
4401
{
4402
    *plow = ~ *plow;
4403
    *phigh = ~ *phigh;
4404
    add128(plow, phigh, 1, 0);
4405
}
4406

    
4407
/* return TRUE if overflow */
4408
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4409
{
4410
    uint64_t q, r, a1, a0;
4411
    int i, qb, ab;
4412

    
4413
    a0 = *plow;
4414
    a1 = *phigh;
4415
    if (a1 == 0) {
4416
        q = a0 / b;
4417
        r = a0 % b;
4418
        *plow = q;
4419
        *phigh = r;
4420
    } else {
4421
        if (a1 >= b)
4422
            return 1;
4423
        /* XXX: use a better algorithm */
4424
        for(i = 0; i < 64; i++) {
4425
            ab = a1 >> 63;
4426
            a1 = (a1 << 1) | (a0 >> 63);
4427
            if (ab || a1 >= b) {
4428
                a1 -= b;
4429
                qb = 1;
4430
            } else {
4431
                qb = 0;
4432
            }
4433
            a0 = (a0 << 1) | qb;
4434
        }
4435
#if defined(DEBUG_MULDIV)
4436
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4437
               *phigh, *plow, b, a0, a1);
4438
#endif
4439
        *plow = a0;
4440
        *phigh = a1;
4441
    }
4442
    return 0;
4443
}
4444

    
4445
/* return TRUE if overflow */
4446
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4447
{
4448
    int sa, sb;
4449
    sa = ((int64_t)*phigh < 0);
4450
    if (sa)
4451
        neg128(plow, phigh);
4452
    sb = (b < 0);
4453
    if (sb)
4454
        b = -b;
4455
    if (div64(plow, phigh, b) != 0)
4456
        return 1;
4457
    if (sa ^ sb) {
4458
        if (*plow > (1ULL << 63))
4459
            return 1;
4460
        *plow = - *plow;
4461
    } else {
4462
        if (*plow >= (1ULL << 63))
4463
            return 1;
4464
    }
4465
    if (sa)
4466
        *phigh = - *phigh;
4467
    return 0;
4468
}
4469

    
4470
void helper_mulq_EAX_T0(target_ulong t0)
4471
{
4472
    uint64_t r0, r1;
4473

    
4474
    mulu64(&r0, &r1, EAX, t0);
4475
    EAX = r0;
4476
    EDX = r1;
4477
    CC_DST = r0;
4478
    CC_SRC = r1;
4479
}
4480

    
4481
void helper_imulq_EAX_T0(target_ulong t0)
4482
{
4483
    uint64_t r0, r1;
4484

    
4485
    muls64(&r0, &r1, EAX, t0);
4486
    EAX = r0;
4487
    EDX = r1;
4488
    CC_DST = r0;
4489
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4490
}
4491

    
4492
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4493
{
4494
    uint64_t r0, r1;
4495

    
4496
    muls64(&r0, &r1, t0, t1);
4497
    CC_DST = r0;
4498
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4499
    return r0;
4500
}
4501

    
4502
void helper_divq_EAX(target_ulong t0)
4503
{
4504
    uint64_t r0, r1;
4505
    if (t0 == 0) {
4506
        raise_exception(EXCP00_DIVZ);
4507
    }
4508
    r0 = EAX;
4509
    r1 = EDX;
4510
    if (div64(&r0, &r1, t0))
4511
        raise_exception(EXCP00_DIVZ);
4512
    EAX = r0;
4513
    EDX = r1;
4514
}
4515

    
4516
void helper_idivq_EAX(target_ulong t0)
4517
{
4518
    uint64_t r0, r1;
4519
    if (t0 == 0) {
4520
        raise_exception(EXCP00_DIVZ);
4521
    }
4522
    r0 = EAX;
4523
    r1 = EDX;
4524
    if (idiv64(&r0, &r1, t0))
4525
        raise_exception(EXCP00_DIVZ);
4526
    EAX = r0;
4527
    EDX = r1;
4528
}
4529
#endif
4530

    
4531
static void do_hlt(void)
4532
{
4533
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4534
    env->halted = 1;
4535
    env->exception_index = EXCP_HLT;
4536
    cpu_loop_exit();
4537
}
4538

    
4539
void helper_hlt(int next_eip_addend)
4540
{
4541
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4542
    EIP += next_eip_addend;
4543
    
4544
    do_hlt();
4545
}
4546

    
4547
void helper_monitor(target_ulong ptr)
4548
{
4549
    if ((uint32_t)ECX != 0)
4550
        raise_exception(EXCP0D_GPF);
4551
    /* XXX: store address ? */
4552
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4553
}
4554

    
4555
void helper_mwait(int next_eip_addend)
4556
{
4557
    if ((uint32_t)ECX != 0)
4558
        raise_exception(EXCP0D_GPF);
4559
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4560
    EIP += next_eip_addend;
4561

    
4562
    /* XXX: not complete but not completely erroneous */
4563
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4564
        /* more than one CPU: do not sleep because another CPU may
4565
           wake this one */
4566
    } else {
4567
        do_hlt();
4568
    }
4569
}
4570

    
4571
void helper_debug(void)
4572
{
4573
    env->exception_index = EXCP_DEBUG;
4574
    cpu_loop_exit();
4575
}
4576

    
4577
void helper_raise_interrupt(int intno, int next_eip_addend)
4578
{
4579
    raise_interrupt(intno, 1, 0, next_eip_addend);
4580
}
4581

    
4582
void helper_raise_exception(int exception_index)
4583
{
4584
    raise_exception(exception_index);
4585
}
4586

    
4587
void helper_cli(void)
4588
{
4589
    env->eflags &= ~IF_MASK;
4590
}
4591

    
4592
void helper_sti(void)
4593
{
4594
    env->eflags |= IF_MASK;
4595
}
4596

    
4597
#if 0
4598
/* vm86plus instructions */
4599
void helper_cli_vm(void)
4600
{
4601
    env->eflags &= ~VIF_MASK;
4602
}
4603

4604
void helper_sti_vm(void)
4605
{
4606
    env->eflags |= VIF_MASK;
4607
    if (env->eflags & VIP_MASK) {
4608
        raise_exception(EXCP0D_GPF);
4609
    }
4610
}
4611
#endif
4612

    
4613
void helper_set_inhibit_irq(void)
4614
{
4615
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4616
}
4617

    
4618
void helper_reset_inhibit_irq(void)
4619
{
4620
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4621
}
4622

    
4623
void helper_boundw(target_ulong a0, int v)
4624
{
4625
    int low, high;
4626
    low = ldsw(a0);
4627
    high = ldsw(a0 + 2);
4628
    v = (int16_t)v;
4629
    if (v < low || v > high) {
4630
        raise_exception(EXCP05_BOUND);
4631
    }
4632
    FORCE_RET();
4633
}
4634

    
4635
void helper_boundl(target_ulong a0, int v)
4636
{
4637
    int low, high;
4638
    low = ldl(a0);
4639
    high = ldl(a0 + 4);
4640
    if (v < low || v > high) {
4641
        raise_exception(EXCP05_BOUND);
4642
    }
4643
    FORCE_RET();
4644
}
4645

    
4646
static float approx_rsqrt(float a)
4647
{
4648
    return 1.0 / sqrt(a);
4649
}
4650

    
4651
static float approx_rcp(float a)
4652
{
4653
    return 1.0 / a;
4654
}
4655

    
4656
#if !defined(CONFIG_USER_ONLY)
4657

    
4658
#define MMUSUFFIX _mmu
4659

    
4660
#define SHIFT 0
4661
#include "softmmu_template.h"
4662

    
4663
#define SHIFT 1
4664
#include "softmmu_template.h"
4665

    
4666
#define SHIFT 2
4667
#include "softmmu_template.h"
4668

    
4669
#define SHIFT 3
4670
#include "softmmu_template.h"
4671

    
4672
#endif
4673

    
4674
/* try to fill the TLB and return an exception if error. If retaddr is
4675
   NULL, it means that the function was called in C code (i.e. not
4676
   from generated code or from helper.c) */
4677
/* XXX: fix it to restore all registers */
4678
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4679
{
4680
    TranslationBlock *tb;
4681
    int ret;
4682
    unsigned long pc;
4683
    CPUX86State *saved_env;
4684

    
4685
    /* XXX: hack to restore env in all cases, even if not called from
4686
       generated code */
4687
    saved_env = env;
4688
    env = cpu_single_env;
4689

    
4690
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4691
    if (ret) {
4692
        if (retaddr) {
4693
            /* now we have a real cpu fault */
4694
            pc = (unsigned long)retaddr;
4695
            tb = tb_find_pc(pc);
4696
            if (tb) {
4697
                /* the PC is inside the translated code. It means that we have
4698
                   a virtual CPU fault */
4699
                cpu_restore_state(tb, env, pc, NULL);
4700
            }
4701
        }
4702
        raise_exception_err(env->exception_index, env->error_code);
4703
    }
4704
    env = saved_env;
4705
}
4706

    
4707

    
4708
/* Secure Virtual Machine helpers */
4709

    
4710
#if defined(CONFIG_USER_ONLY)
4711

    
4712
void helper_vmrun(int aflag, int next_eip_addend)
4713
{ 
4714
}
4715
void helper_vmmcall(void) 
4716
{ 
4717
}
4718
void helper_vmload(int aflag)
4719
{ 
4720
}
4721
void helper_vmsave(int aflag)
4722
{ 
4723
}
4724
void helper_stgi(void)
4725
{
4726
}
4727
void helper_clgi(void)
4728
{
4729
}
4730
void helper_skinit(void) 
4731
{ 
4732
}
4733
void helper_invlpga(int aflag)
4734
{ 
4735
}
4736
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4737
{ 
4738
}
4739
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4740
{
4741
}
4742

    
4743
void helper_svm_check_io(uint32_t port, uint32_t param, 
4744
                         uint32_t next_eip_addend)
4745
{
4746
}
4747
#else
4748

    
4749
static inline void svm_save_seg(target_phys_addr_t addr,
4750
                                const SegmentCache *sc)
4751
{
4752
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4753
             sc->selector);
4754
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4755
             sc->base);
4756
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4757
             sc->limit);
4758
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4759
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4760
}
4761
                                
4762
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4763
{
4764
    unsigned int flags;
4765

    
4766
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4767
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4768
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4769
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4770
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4771
}
4772

    
4773
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4774
                                      CPUState *env, int seg_reg)
4775
{
4776
    SegmentCache sc1, *sc = &sc1;
4777
    svm_load_seg(addr, sc);
4778
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4779
                           sc->base, sc->limit, sc->flags);
4780
}
4781

    
4782
void helper_vmrun(int aflag, int next_eip_addend)
4783
{
4784
    target_ulong addr;
4785
    uint32_t event_inj;
4786
    uint32_t int_ctl;
4787

    
4788
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4789

    
4790
    if (aflag == 2)
4791
        addr = EAX;
4792
    else
4793
        addr = (uint32_t)EAX;
4794

    
4795
    if (loglevel & CPU_LOG_TB_IN_ASM)
4796
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4797

    
4798
    env->vm_vmcb = addr;
4799

    
4800
    /* save the current CPU state in the hsave page */
4801
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4802
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4803

    
4804
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4805
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4806

    
4807
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4808
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4809
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4810
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4811
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4812
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4813

    
4814
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4815
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4816

    
4817
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4818
                  &env->segs[R_ES]);
4819
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4820
                 &env->segs[R_CS]);
4821
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4822
                 &env->segs[R_SS]);
4823
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4824
                 &env->segs[R_DS]);
4825

    
4826
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4827
             EIP + next_eip_addend);
4828
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4829
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4830

    
4831
    /* load the interception bitmaps so we do not need to access the
4832
       vmcb in svm mode */
4833
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4834
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4835
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4836
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4837
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4838
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4839

    
4840
    /* enable intercepts */
4841
    env->hflags |= HF_SVMI_MASK;
4842

    
4843
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4844

    
4845
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4846
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4847

    
4848
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4849
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4850

    
4851
    /* clear exit_info_2 so we behave like the real hardware */
4852
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4853

    
4854
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4855
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4856
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4857
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4858
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4859
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4860
    if (int_ctl & V_INTR_MASKING_MASK) {
4861
        env->v_tpr = int_ctl & V_TPR_MASK;
4862
        env->hflags2 |= HF2_VINTR_MASK;
4863
        if (env->eflags & IF_MASK)
4864
            env->hflags2 |= HF2_HIF_MASK;
4865
    }
4866

    
4867
    cpu_load_efer(env, 
4868
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4869
    env->eflags = 0;
4870
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4871
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4872
    CC_OP = CC_OP_EFLAGS;
4873

    
4874
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4875
                       env, R_ES);
4876
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4877
                       env, R_CS);
4878
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4879
                       env, R_SS);
4880
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4881
                       env, R_DS);
4882

    
4883
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4884
    env->eip = EIP;
4885
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4886
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4887
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4888
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4889
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4890

    
4891
    /* FIXME: guest state consistency checks */
4892

    
4893
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4894
        case TLB_CONTROL_DO_NOTHING:
4895
            break;
4896
        case TLB_CONTROL_FLUSH_ALL_ASID:
4897
            /* FIXME: this is not 100% correct but should work for now */
4898
            tlb_flush(env, 1);
4899
        break;
4900
    }
4901

    
4902
    env->hflags2 |= HF2_GIF_MASK;
4903

    
4904
    if (int_ctl & V_IRQ_MASK) {
4905
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4906
    }
4907

    
4908
    /* maybe we need to inject an event */
4909
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4910
    if (event_inj & SVM_EVTINJ_VALID) {
4911
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4912
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4913
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4914
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4915

    
4916
        if (loglevel & CPU_LOG_TB_IN_ASM)
4917
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4918
        /* FIXME: need to implement valid_err */
4919
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4920
        case SVM_EVTINJ_TYPE_INTR:
4921
                env->exception_index = vector;
4922
                env->error_code = event_inj_err;
4923
                env->exception_is_int = 0;
4924
                env->exception_next_eip = -1;
4925
                if (loglevel & CPU_LOG_TB_IN_ASM)
4926
                    fprintf(logfile, "INTR");
4927
                /* XXX: is it always correct ? */
4928
                do_interrupt(vector, 0, 0, 0, 1);
4929
                break;
4930
        case SVM_EVTINJ_TYPE_NMI:
4931
                env->exception_index = EXCP02_NMI;
4932
                env->error_code = event_inj_err;
4933
                env->exception_is_int = 0;
4934
                env->exception_next_eip = EIP;
4935
                if (loglevel & CPU_LOG_TB_IN_ASM)
4936
                    fprintf(logfile, "NMI");
4937
                cpu_loop_exit();
4938
                break;
4939
        case SVM_EVTINJ_TYPE_EXEPT:
4940
                env->exception_index = vector;
4941
                env->error_code = event_inj_err;
4942
                env->exception_is_int = 0;
4943
                env->exception_next_eip = -1;
4944
                if (loglevel & CPU_LOG_TB_IN_ASM)
4945
                    fprintf(logfile, "EXEPT");
4946
                cpu_loop_exit();
4947
                break;
4948
        case SVM_EVTINJ_TYPE_SOFT:
4949
                env->exception_index = vector;
4950
                env->error_code = event_inj_err;
4951
                env->exception_is_int = 1;
4952
                env->exception_next_eip = EIP;
4953
                if (loglevel & CPU_LOG_TB_IN_ASM)
4954
                    fprintf(logfile, "SOFT");
4955
                cpu_loop_exit();
4956
                break;
4957
        }
4958
        if (loglevel & CPU_LOG_TB_IN_ASM)
4959
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4960
    }
4961
}
4962

    
4963
void helper_vmmcall(void)
4964
{
4965
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4966
    raise_exception(EXCP06_ILLOP);
4967
}
4968

    
4969
void helper_vmload(int aflag)
4970
{
4971
    target_ulong addr;
4972
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4973

    
4974
    if (aflag == 2)
4975
        addr = EAX;
4976
    else
4977
        addr = (uint32_t)EAX;
4978

    
4979
    if (loglevel & CPU_LOG_TB_IN_ASM)
4980
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4981
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4982
                env->segs[R_FS].base);
4983

    
4984
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4985
                       env, R_FS);
4986
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4987
                       env, R_GS);
4988
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4989
                 &env->tr);
4990
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4991
                 &env->ldt);
4992

    
4993
#ifdef TARGET_X86_64
4994
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4995
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4996
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4997
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4998
#endif
4999
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5000
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5001
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5002
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5003
}
5004

    
5005
void helper_vmsave(int aflag)
5006
{
5007
    target_ulong addr;
5008
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5009

    
5010
    if (aflag == 2)
5011
        addr = EAX;
5012
    else
5013
        addr = (uint32_t)EAX;
5014

    
5015
    if (loglevel & CPU_LOG_TB_IN_ASM)
5016
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5017
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5018
                env->segs[R_FS].base);
5019

    
5020
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5021
                 &env->segs[R_FS]);
5022
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5023
                 &env->segs[R_GS]);
5024
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5025
                 &env->tr);
5026
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5027
                 &env->ldt);
5028

    
5029
#ifdef TARGET_X86_64
5030
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5031
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5032
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5033
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5034
#endif
5035
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5036
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5037
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5038
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5039
}
5040

    
5041
void helper_stgi(void)
5042
{
5043
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5044
    env->hflags2 |= HF2_GIF_MASK;
5045
}
5046

    
5047
void helper_clgi(void)
5048
{
5049
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5050
    env->hflags2 &= ~HF2_GIF_MASK;
5051
}
5052

    
5053
void helper_skinit(void)
5054
{
5055
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5056
    /* XXX: not implemented */
5057
    raise_exception(EXCP06_ILLOP);
5058
}
5059

    
5060
void helper_invlpga(int aflag)
5061
{
5062
    target_ulong addr;
5063
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5064
    
5065
    if (aflag == 2)
5066
        addr = EAX;
5067
    else
5068
        addr = (uint32_t)EAX;
5069

    
5070
    /* XXX: could use the ASID to see if it is needed to do the
5071
       flush */
5072
    tlb_flush_page(env, addr);
5073
}
5074

    
5075
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5076
{
5077
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5078
        return;
5079
    switch(type) {
5080
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5081
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5082
            helper_vmexit(type, param);
5083
        }
5084
        break;
5085
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5086
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5087
            helper_vmexit(type, param);
5088
        }
5089
        break;
5090
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5091
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5092
            helper_vmexit(type, param);
5093
        }
5094
        break;
5095
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5096
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5097
            helper_vmexit(type, param);
5098
        }
5099
        break;
5100
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5101
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5102
            helper_vmexit(type, param);
5103
        }
5104
        break;
5105
    case SVM_EXIT_MSR:
5106
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5107
            /* FIXME: this should be read in at vmrun (faster this way?) */
5108
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5109
            uint32_t t0, t1;
5110
            switch((uint32_t)ECX) {
5111
            case 0 ... 0x1fff:
5112
                t0 = (ECX * 2) % 8;
5113
                t1 = ECX / 8;
5114
                break;
5115
            case 0xc0000000 ... 0xc0001fff:
5116
                t0 = (8192 + ECX - 0xc0000000) * 2;
5117
                t1 = (t0 / 8);
5118
                t0 %= 8;
5119
                break;
5120
            case 0xc0010000 ... 0xc0011fff:
5121
                t0 = (16384 + ECX - 0xc0010000) * 2;
5122
                t1 = (t0 / 8);
5123
                t0 %= 8;
5124
                break;
5125
            default:
5126
                helper_vmexit(type, param);
5127
                t0 = 0;
5128
                t1 = 0;
5129
                break;
5130
            }
5131
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5132
                helper_vmexit(type, param);
5133
        }
5134
        break;
5135
    default:
5136
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5137
            helper_vmexit(type, param);
5138
        }
5139
        break;
5140
    }
5141
}
5142

    
5143
void helper_svm_check_io(uint32_t port, uint32_t param, 
5144
                         uint32_t next_eip_addend)
5145
{
5146
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5147
        /* FIXME: this should be read in at vmrun (faster this way?) */
5148
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5149
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5150
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5151
            /* next EIP */
5152
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5153
                     env->eip + next_eip_addend);
5154
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5155
        }
5156
    }
5157
}
5158

    
5159
/* Note: currently only 32 bits of exit_code are used */
5160
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5161
{
5162
    uint32_t int_ctl;
5163

    
5164
    if (loglevel & CPU_LOG_TB_IN_ASM)
5165
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5166
                exit_code, exit_info_1,
5167
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5168
                EIP);
5169

    
5170
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5171
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5172
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5173
    } else {
5174
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5175
    }
5176

    
5177
    /* Save the VM state in the vmcb */
5178
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5179
                 &env->segs[R_ES]);
5180
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5181
                 &env->segs[R_CS]);
5182
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5183
                 &env->segs[R_SS]);
5184
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5185
                 &env->segs[R_DS]);
5186

    
5187
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5188
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5189

    
5190
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5191
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5192

    
5193
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5194
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5195
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5196
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5197
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5198

    
5199
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5200
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5201
    int_ctl |= env->v_tpr & V_TPR_MASK;
5202
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5203
        int_ctl |= V_IRQ_MASK;
5204
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5205

    
5206
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5207
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5208
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5209
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5210
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5211
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5212
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5213

    
5214
    /* Reload the host state from vm_hsave */
5215
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5216
    env->hflags &= ~HF_SVMI_MASK;
5217
    env->intercept = 0;
5218
    env->intercept_exceptions = 0;
5219
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5220
    env->tsc_offset = 0;
5221

    
5222
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5223
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5224

    
5225
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5226
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5227

    
5228
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5229
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5230
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5231
    /* we need to set the efer after the crs so the hidden flags get
5232
       set properly */
5233
    cpu_load_efer(env, 
5234
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5235
    env->eflags = 0;
5236
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5237
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5238
    CC_OP = CC_OP_EFLAGS;
5239

    
5240
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5241
                       env, R_ES);
5242
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5243
                       env, R_CS);
5244
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5245
                       env, R_SS);
5246
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5247
                       env, R_DS);
5248

    
5249
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5250
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5251
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5252

    
5253
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5254
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5255

    
5256
    /* other setups */
5257
    cpu_x86_set_cpl(env, 0);
5258
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5259
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5260

    
5261
    env->hflags2 &= ~HF2_GIF_MASK;
5262
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5263

    
5264
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5265

    
5266
    /* Clears the TSC_OFFSET inside the processor. */
5267

    
5268
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5269
       from the page table indicated the host's CR3. If the PDPEs contain
5270
       illegal state, the processor causes a shutdown. */
5271

    
5272
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5273
    env->cr[0] |= CR0_PE_MASK;
5274
    env->eflags &= ~VM_MASK;
5275

    
5276
    /* Disables all breakpoints in the host DR7 register. */
5277

    
5278
    /* Checks the reloaded host state for consistency. */
5279

    
5280
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5281
       host's code segment or non-canonical (in the case of long mode), a
5282
       #GP fault is delivered inside the host.) */
5283

    
5284
    /* remove any pending exception */
5285
    env->exception_index = -1;
5286
    env->error_code = 0;
5287
    env->old_exception = -1;
5288

    
5289
    cpu_loop_exit();
5290
}
5291

    
5292
#endif
5293

    
5294
/* MMX/SSE */
5295
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5296
void helper_enter_mmx(void)
5297
{
5298
    env->fpstt = 0;
5299
    *(uint32_t *)(env->fptags) = 0;
5300
    *(uint32_t *)(env->fptags + 4) = 0;
5301
}
5302

    
5303
void helper_emms(void)
5304
{
5305
    /* set to empty state */
5306
    *(uint32_t *)(env->fptags) = 0x01010101;
5307
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5308
}
5309

    
5310
/* XXX: suppress */
5311
void helper_movq(void *d, void *s)
5312
{
5313
    *(uint64_t *)d = *(uint64_t *)s;
5314
}
5315

    
5316
#define SHIFT 0
5317
#include "ops_sse.h"
5318

    
5319
#define SHIFT 1
5320
#include "ops_sse.h"
5321

    
5322
#define SHIFT 0
5323
#include "helper_template.h"
5324
#undef SHIFT
5325

    
5326
#define SHIFT 1
5327
#include "helper_template.h"
5328
#undef SHIFT
5329

    
5330
#define SHIFT 2
5331
#include "helper_template.h"
5332
#undef SHIFT
5333

    
5334
#ifdef TARGET_X86_64
5335

    
5336
#define SHIFT 3
5337
#include "helper_template.h"
5338
#undef SHIFT
5339

    
5340
#endif
5341

    
5342
/* bit operations */
5343
target_ulong helper_bsf(target_ulong t0)
5344
{
5345
    int count;
5346
    target_ulong res;
5347

    
5348
    res = t0;
5349
    count = 0;
5350
    while ((res & 1) == 0) {
5351
        count++;
5352
        res >>= 1;
5353
    }
5354
    return count;
5355
}
5356

    
5357
target_ulong helper_bsr(target_ulong t0)
5358
{
5359
    int count;
5360
    target_ulong res, mask;
5361
    
5362
    res = t0;
5363
    count = TARGET_LONG_BITS - 1;
5364
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5365
    while ((res & mask) == 0) {
5366
        count--;
5367
        res <<= 1;
5368
    }
5369
    return count;
5370
}
5371

    
5372

    
5373
static int compute_all_eflags(void)
5374
{
5375
    return CC_SRC;
5376
}
5377

    
5378
static int compute_c_eflags(void)
5379
{
5380
    return CC_SRC & CC_C;
5381
}
5382

    
5383
uint32_t helper_cc_compute_all(int op)
5384
{
5385
    switch (op) {
5386
    default: /* should never happen */ return 0;
5387

    
5388
    case CC_OP_EFLAGS: return compute_all_eflags();
5389

    
5390
    case CC_OP_MULB: return compute_all_mulb();
5391
    case CC_OP_MULW: return compute_all_mulw();
5392
    case CC_OP_MULL: return compute_all_mull();
5393

    
5394
    case CC_OP_ADDB: return compute_all_addb();
5395
    case CC_OP_ADDW: return compute_all_addw();
5396
    case CC_OP_ADDL: return compute_all_addl();
5397

    
5398
    case CC_OP_ADCB: return compute_all_adcb();
5399
    case CC_OP_ADCW: return compute_all_adcw();
5400
    case CC_OP_ADCL: return compute_all_adcl();
5401

    
5402
    case CC_OP_SUBB: return compute_all_subb();
5403
    case CC_OP_SUBW: return compute_all_subw();
5404
    case CC_OP_SUBL: return compute_all_subl();
5405

    
5406
    case CC_OP_SBBB: return compute_all_sbbb();
5407
    case CC_OP_SBBW: return compute_all_sbbw();
5408
    case CC_OP_SBBL: return compute_all_sbbl();
5409

    
5410
    case CC_OP_LOGICB: return compute_all_logicb();
5411
    case CC_OP_LOGICW: return compute_all_logicw();
5412
    case CC_OP_LOGICL: return compute_all_logicl();
5413

    
5414
    case CC_OP_INCB: return compute_all_incb();
5415
    case CC_OP_INCW: return compute_all_incw();
5416
    case CC_OP_INCL: return compute_all_incl();
5417

    
5418
    case CC_OP_DECB: return compute_all_decb();
5419
    case CC_OP_DECW: return compute_all_decw();
5420
    case CC_OP_DECL: return compute_all_decl();
5421

    
5422
    case CC_OP_SHLB: return compute_all_shlb();
5423
    case CC_OP_SHLW: return compute_all_shlw();
5424
    case CC_OP_SHLL: return compute_all_shll();
5425

    
5426
    case CC_OP_SARB: return compute_all_sarb();
5427
    case CC_OP_SARW: return compute_all_sarw();
5428
    case CC_OP_SARL: return compute_all_sarl();
5429

    
5430
#ifdef TARGET_X86_64
5431
    case CC_OP_MULQ: return compute_all_mulq();
5432

    
5433
    case CC_OP_ADDQ: return compute_all_addq();
5434

    
5435
    case CC_OP_ADCQ: return compute_all_adcq();
5436

    
5437
    case CC_OP_SUBQ: return compute_all_subq();
5438

    
5439
    case CC_OP_SBBQ: return compute_all_sbbq();
5440

    
5441
    case CC_OP_LOGICQ: return compute_all_logicq();
5442

    
5443
    case CC_OP_INCQ: return compute_all_incq();
5444

    
5445
    case CC_OP_DECQ: return compute_all_decq();
5446

    
5447
    case CC_OP_SHLQ: return compute_all_shlq();
5448

    
5449
    case CC_OP_SARQ: return compute_all_sarq();
5450
#endif
5451
    }
5452
}
5453

    
5454
uint32_t helper_cc_compute_c(int op)
5455
{
5456
    switch (op) {
5457
    default: /* should never happen */ return 0;
5458

    
5459
    case CC_OP_EFLAGS: return compute_c_eflags();
5460

    
5461
    case CC_OP_MULB: return compute_c_mull();
5462
    case CC_OP_MULW: return compute_c_mull();
5463
    case CC_OP_MULL: return compute_c_mull();
5464

    
5465
    case CC_OP_ADDB: return compute_c_addb();
5466
    case CC_OP_ADDW: return compute_c_addw();
5467
    case CC_OP_ADDL: return compute_c_addl();
5468

    
5469
    case CC_OP_ADCB: return compute_c_adcb();
5470
    case CC_OP_ADCW: return compute_c_adcw();
5471
    case CC_OP_ADCL: return compute_c_adcl();
5472

    
5473
    case CC_OP_SUBB: return compute_c_subb();
5474
    case CC_OP_SUBW: return compute_c_subw();
5475
    case CC_OP_SUBL: return compute_c_subl();
5476

    
5477
    case CC_OP_SBBB: return compute_c_sbbb();
5478
    case CC_OP_SBBW: return compute_c_sbbw();
5479
    case CC_OP_SBBL: return compute_c_sbbl();
5480

    
5481
    case CC_OP_LOGICB: return compute_c_logicb();
5482
    case CC_OP_LOGICW: return compute_c_logicw();
5483
    case CC_OP_LOGICL: return compute_c_logicl();
5484

    
5485
    case CC_OP_INCB: return compute_c_incl();
5486
    case CC_OP_INCW: return compute_c_incl();
5487
    case CC_OP_INCL: return compute_c_incl();
5488

    
5489
    case CC_OP_DECB: return compute_c_incl();
5490
    case CC_OP_DECW: return compute_c_incl();
5491
    case CC_OP_DECL: return compute_c_incl();
5492

    
5493
    case CC_OP_SHLB: return compute_c_shlb();
5494
    case CC_OP_SHLW: return compute_c_shlw();
5495
    case CC_OP_SHLL: return compute_c_shll();
5496

    
5497
    case CC_OP_SARB: return compute_c_sarl();
5498
    case CC_OP_SARW: return compute_c_sarl();
5499
    case CC_OP_SARL: return compute_c_sarl();
5500

    
5501
#ifdef TARGET_X86_64
5502
    case CC_OP_MULQ: return compute_c_mull();
5503

    
5504
    case CC_OP_ADDQ: return compute_c_addq();
5505

    
5506
    case CC_OP_ADCQ: return compute_c_adcq();
5507

    
5508
    case CC_OP_SUBQ: return compute_c_subq();
5509

    
5510
    case CC_OP_SBBQ: return compute_c_sbbq();
5511

    
5512
    case CC_OP_LOGICQ: return compute_c_logicq();
5513

    
5514
    case CC_OP_INCQ: return compute_c_incl();
5515

    
5516
    case CC_OP_DECQ: return compute_c_incl();
5517

    
5518
    case CC_OP_SHLQ: return compute_c_shlq();
5519

    
5520
    case CC_OP_SARQ: return compute_c_sarl();
5521
#endif
5522
    }
5523
}