Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 1c918eba

History | View | Annotate | Download (153.7 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24

    
25
//#define DEBUG_PCALL
26

    
27
#if 0
28
#define raise_exception_err(a, b)\
29
do {\
30
    if (logfile)\
31
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
32
    (raise_exception_err)(a, b);\
33
} while (0)
34
#endif
35

    
36
static const uint8_t parity_table[256] = {
37
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
};
70

    
71
/* modulo 17 table */
72
static const uint8_t rclw_table[32] = {
73
    0, 1, 2, 3, 4, 5, 6, 7,
74
    8, 9,10,11,12,13,14,15,
75
   16, 0, 1, 2, 3, 4, 5, 6,
76
    7, 8, 9,10,11,12,13,14,
77
};
78

    
79
/* modulo 9 table */
80
static const uint8_t rclb_table[32] = {
81
    0, 1, 2, 3, 4, 5, 6, 7,
82
    8, 0, 1, 2, 3, 4, 5, 6,
83
    7, 8, 0, 1, 2, 3, 4, 5,
84
    6, 7, 8, 0, 1, 2, 3, 4,
85
};
86

    
87
static const CPU86_LDouble f15rk[7] =
88
{
89
    0.00000000000000000000L,
90
    1.00000000000000000000L,
91
    3.14159265358979323851L,  /*pi*/
92
    0.30102999566398119523L,  /*lg2*/
93
    0.69314718055994530943L,  /*ln2*/
94
    1.44269504088896340739L,  /*l2e*/
95
    3.32192809488736234781L,  /*l2t*/
96
};
97

    
98
/* broken thread support */
99

    
100
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101

    
102
void helper_lock(void)
103
{
104
    spin_lock(&global_cpu_lock);
105
}
106

    
107
void helper_unlock(void)
108
{
109
    spin_unlock(&global_cpu_lock);
110
}
111

    
112
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
113
{
114
    load_eflags(t0, update_mask);
115
}
116

    
117
target_ulong helper_read_eflags(void)
118
{
119
    uint32_t eflags;
120
    eflags = helper_cc_compute_all(CC_OP);
121
    eflags |= (DF & DF_MASK);
122
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
123
    return eflags;
124
}
125

    
126
/* return non zero if error */
127
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
128
                               int selector)
129
{
130
    SegmentCache *dt;
131
    int index;
132
    target_ulong ptr;
133

    
134
    if (selector & 0x4)
135
        dt = &env->ldt;
136
    else
137
        dt = &env->gdt;
138
    index = selector & ~7;
139
    if ((index + 7) > dt->limit)
140
        return -1;
141
    ptr = dt->base + index;
142
    *e1_ptr = ldl_kernel(ptr);
143
    *e2_ptr = ldl_kernel(ptr + 4);
144
    return 0;
145
}
146

    
147
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
148
{
149
    unsigned int limit;
150
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
151
    if (e2 & DESC_G_MASK)
152
        limit = (limit << 12) | 0xfff;
153
    return limit;
154
}
155

    
156
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
157
{
158
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
159
}
160

    
161
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
162
{
163
    sc->base = get_seg_base(e1, e2);
164
    sc->limit = get_seg_limit(e1, e2);
165
    sc->flags = e2;
166
}
167

    
168
/* init the segment cache in vm86 mode. */
169
static inline void load_seg_vm(int seg, int selector)
170
{
171
    selector &= 0xffff;
172
    cpu_x86_load_seg_cache(env, seg, selector,
173
                           (selector << 4), 0xffff, 0);
174
}
175

    
176
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
177
                                       uint32_t *esp_ptr, int dpl)
178
{
179
    int type, index, shift;
180

    
181
#if 0
182
    {
183
        int i;
184
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
185
        for(i=0;i<env->tr.limit;i++) {
186
            printf("%02x ", env->tr.base[i]);
187
            if ((i & 7) == 7) printf("\n");
188
        }
189
        printf("\n");
190
    }
191
#endif
192

    
193
    if (!(env->tr.flags & DESC_P_MASK))
194
        cpu_abort(env, "invalid tss");
195
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
196
    if ((type & 7) != 1)
197
        cpu_abort(env, "invalid tss type");
198
    shift = type >> 3;
199
    index = (dpl * 4 + 2) << shift;
200
    if (index + (4 << shift) - 1 > env->tr.limit)
201
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
202
    if (shift == 0) {
203
        *esp_ptr = lduw_kernel(env->tr.base + index);
204
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
205
    } else {
206
        *esp_ptr = ldl_kernel(env->tr.base + index);
207
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
208
    }
209
}
210

    
211
/* XXX: merge with load_seg() */
212
static void tss_load_seg(int seg_reg, int selector)
213
{
214
    uint32_t e1, e2;
215
    int rpl, dpl, cpl;
216

    
217
    if ((selector & 0xfffc) != 0) {
218
        if (load_segment(&e1, &e2, selector) != 0)
219
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220
        if (!(e2 & DESC_S_MASK))
221
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
        rpl = selector & 3;
223
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
224
        cpl = env->hflags & HF_CPL_MASK;
225
        if (seg_reg == R_CS) {
226
            if (!(e2 & DESC_CS_MASK))
227
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228
            /* XXX: is it correct ? */
229
            if (dpl != rpl)
230
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            if ((e2 & DESC_C_MASK) && dpl > rpl)
232
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
        } else if (seg_reg == R_SS) {
234
            /* SS must be writable data */
235
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            if (dpl != cpl || dpl != rpl)
238
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239
        } else {
240
            /* not readable code */
241
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
            /* if data or non conforming code, checks the rights */
244
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
245
                if (dpl < cpl || dpl < rpl)
246
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            }
248
        }
249
        if (!(e2 & DESC_P_MASK))
250
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
251
        cpu_x86_load_seg_cache(env, seg_reg, selector,
252
                       get_seg_base(e1, e2),
253
                       get_seg_limit(e1, e2),
254
                       e2);
255
    } else {
256
        if (seg_reg == R_SS || seg_reg == R_CS)
257
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
258
    }
259
}
260

    
261
#define SWITCH_TSS_JMP  0
262
#define SWITCH_TSS_IRET 1
263
#define SWITCH_TSS_CALL 2
264

    
265
/* XXX: restore CPU state in registers (PowerPC case) */
266
static void switch_tss(int tss_selector,
267
                       uint32_t e1, uint32_t e2, int source,
268
                       uint32_t next_eip)
269
{
270
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
271
    target_ulong tss_base;
272
    uint32_t new_regs[8], new_segs[6];
273
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
274
    uint32_t old_eflags, eflags_mask;
275
    SegmentCache *dt;
276
    int index;
277
    target_ulong ptr;
278

    
279
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280
#ifdef DEBUG_PCALL
281
    if (loglevel & CPU_LOG_PCALL)
282
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
283
#endif
284

    
285
    /* if task gate, we read the TSS segment and we load it */
286
    if (type == 5) {
287
        if (!(e2 & DESC_P_MASK))
288
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289
        tss_selector = e1 >> 16;
290
        if (tss_selector & 4)
291
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292
        if (load_segment(&e1, &e2, tss_selector) != 0)
293
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294
        if (e2 & DESC_S_MASK)
295
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297
        if ((type & 7) != 1)
298
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
299
    }
300

    
301
    if (!(e2 & DESC_P_MASK))
302
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303

    
304
    if (type & 8)
305
        tss_limit_max = 103;
306
    else
307
        tss_limit_max = 43;
308
    tss_limit = get_seg_limit(e1, e2);
309
    tss_base = get_seg_base(e1, e2);
310
    if ((tss_selector & 4) != 0 ||
311
        tss_limit < tss_limit_max)
312
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314
    if (old_type & 8)
315
        old_tss_limit_max = 103;
316
    else
317
        old_tss_limit_max = 43;
318

    
319
    /* read all the registers from the new TSS */
320
    if (type & 8) {
321
        /* 32 bit */
322
        new_cr3 = ldl_kernel(tss_base + 0x1c);
323
        new_eip = ldl_kernel(tss_base + 0x20);
324
        new_eflags = ldl_kernel(tss_base + 0x24);
325
        for(i = 0; i < 8; i++)
326
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327
        for(i = 0; i < 6; i++)
328
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329
        new_ldt = lduw_kernel(tss_base + 0x60);
330
        new_trap = ldl_kernel(tss_base + 0x64);
331
    } else {
332
        /* 16 bit */
333
        new_cr3 = 0;
334
        new_eip = lduw_kernel(tss_base + 0x0e);
335
        new_eflags = lduw_kernel(tss_base + 0x10);
336
        for(i = 0; i < 8; i++)
337
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338
        for(i = 0; i < 4; i++)
339
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340
        new_ldt = lduw_kernel(tss_base + 0x2a);
341
        new_segs[R_FS] = 0;
342
        new_segs[R_GS] = 0;
343
        new_trap = 0;
344
    }
345

    
346
    /* NOTE: we must avoid memory exceptions during the task switch,
347
       so we make dummy accesses before */
348
    /* XXX: it can still fail in some cases, so a bigger hack is
349
       necessary to valid the TLB after having done the accesses */
350

    
351
    v1 = ldub_kernel(env->tr.base);
352
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
353
    stb_kernel(env->tr.base, v1);
354
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
355

    
356
    /* clear busy bit (it is restartable) */
357
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
358
        target_ulong ptr;
359
        uint32_t e2;
360
        ptr = env->gdt.base + (env->tr.selector & ~7);
361
        e2 = ldl_kernel(ptr + 4);
362
        e2 &= ~DESC_TSS_BUSY_MASK;
363
        stl_kernel(ptr + 4, e2);
364
    }
365
    old_eflags = compute_eflags();
366
    if (source == SWITCH_TSS_IRET)
367
        old_eflags &= ~NT_MASK;
368

    
369
    /* save the current state in the old TSS */
370
    if (type & 8) {
371
        /* 32 bit */
372
        stl_kernel(env->tr.base + 0x20, next_eip);
373
        stl_kernel(env->tr.base + 0x24, old_eflags);
374
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
375
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
376
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
377
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
378
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
379
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
380
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
381
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
382
        for(i = 0; i < 6; i++)
383
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
384
    } else {
385
        /* 16 bit */
386
        stw_kernel(env->tr.base + 0x0e, next_eip);
387
        stw_kernel(env->tr.base + 0x10, old_eflags);
388
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
389
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
390
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
391
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
392
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
393
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
394
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
395
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
396
        for(i = 0; i < 4; i++)
397
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
398
    }
399

    
400
    /* now if an exception occurs, it will occurs in the next task
401
       context */
402

    
403
    if (source == SWITCH_TSS_CALL) {
404
        stw_kernel(tss_base, env->tr.selector);
405
        new_eflags |= NT_MASK;
406
    }
407

    
408
    /* set busy bit */
409
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
410
        target_ulong ptr;
411
        uint32_t e2;
412
        ptr = env->gdt.base + (tss_selector & ~7);
413
        e2 = ldl_kernel(ptr + 4);
414
        e2 |= DESC_TSS_BUSY_MASK;
415
        stl_kernel(ptr + 4, e2);
416
    }
417

    
418
    /* set the new CPU state */
419
    /* from this point, any exception which occurs can give problems */
420
    env->cr[0] |= CR0_TS_MASK;
421
    env->hflags |= HF_TS_MASK;
422
    env->tr.selector = tss_selector;
423
    env->tr.base = tss_base;
424
    env->tr.limit = tss_limit;
425
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426

    
427
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
428
        cpu_x86_update_cr3(env, new_cr3);
429
    }
430

    
431
    /* load all registers without an exception, then reload them with
432
       possible exception */
433
    env->eip = new_eip;
434
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
435
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
436
    if (!(type & 8))
437
        eflags_mask &= 0xffff;
438
    load_eflags(new_eflags, eflags_mask);
439
    /* XXX: what to do in 16 bit case ? */
440
    EAX = new_regs[0];
441
    ECX = new_regs[1];
442
    EDX = new_regs[2];
443
    EBX = new_regs[3];
444
    ESP = new_regs[4];
445
    EBP = new_regs[5];
446
    ESI = new_regs[6];
447
    EDI = new_regs[7];
448
    if (new_eflags & VM_MASK) {
449
        for(i = 0; i < 6; i++)
450
            load_seg_vm(i, new_segs[i]);
451
        /* in vm86, CPL is always 3 */
452
        cpu_x86_set_cpl(env, 3);
453
    } else {
454
        /* CPL is set the RPL of CS */
455
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
456
        /* first just selectors as the rest may trigger exceptions */
457
        for(i = 0; i < 6; i++)
458
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
459
    }
460

    
461
    env->ldt.selector = new_ldt & ~4;
462
    env->ldt.base = 0;
463
    env->ldt.limit = 0;
464
    env->ldt.flags = 0;
465

    
466
    /* load the LDT */
467
    if (new_ldt & 4)
468
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469

    
470
    if ((new_ldt & 0xfffc) != 0) {
471
        dt = &env->gdt;
472
        index = new_ldt & ~7;
473
        if ((index + 7) > dt->limit)
474
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
475
        ptr = dt->base + index;
476
        e1 = ldl_kernel(ptr);
477
        e2 = ldl_kernel(ptr + 4);
478
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
479
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480
        if (!(e2 & DESC_P_MASK))
481
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
483
    }
484

    
485
    /* load the segments */
486
    if (!(new_eflags & VM_MASK)) {
487
        tss_load_seg(R_CS, new_segs[R_CS]);
488
        tss_load_seg(R_SS, new_segs[R_SS]);
489
        tss_load_seg(R_ES, new_segs[R_ES]);
490
        tss_load_seg(R_DS, new_segs[R_DS]);
491
        tss_load_seg(R_FS, new_segs[R_FS]);
492
        tss_load_seg(R_GS, new_segs[R_GS]);
493
    }
494

    
495
    /* check that EIP is in the CS segment limits */
496
    if (new_eip > env->segs[R_CS].limit) {
497
        /* XXX: different exception if CALL ? */
498
        raise_exception_err(EXCP0D_GPF, 0);
499
    }
500

    
501
#ifndef CONFIG_USER_ONLY
502
    /* reset local breakpoints */
503
    if (env->dr[7] & 0x55) {
504
        for (i = 0; i < 4; i++) {
505
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
506
                hw_breakpoint_remove(env, i);
507
        }
508
        env->dr[7] &= ~0x55;
509
    }
510
#endif
511
}
512

    
513
/* check if Port I/O is allowed in TSS */
514
static inline void check_io(int addr, int size)
515
{
516
    int io_offset, val, mask;
517

    
518
    /* TSS must be a valid 32 bit one */
519
    if (!(env->tr.flags & DESC_P_MASK) ||
520
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
521
        env->tr.limit < 103)
522
        goto fail;
523
    io_offset = lduw_kernel(env->tr.base + 0x66);
524
    io_offset += (addr >> 3);
525
    /* Note: the check needs two bytes */
526
    if ((io_offset + 1) > env->tr.limit)
527
        goto fail;
528
    val = lduw_kernel(env->tr.base + io_offset);
529
    val >>= (addr & 7);
530
    mask = (1 << size) - 1;
531
    /* all bits must be zero to allow the I/O */
532
    if ((val & mask) != 0) {
533
    fail:
534
        raise_exception_err(EXCP0D_GPF, 0);
535
    }
536
}
537

    
538
void helper_check_iob(uint32_t t0)
539
{
540
    check_io(t0, 1);
541
}
542

    
543
void helper_check_iow(uint32_t t0)
544
{
545
    check_io(t0, 2);
546
}
547

    
548
void helper_check_iol(uint32_t t0)
549
{
550
    check_io(t0, 4);
551
}
552

    
553
void helper_outb(uint32_t port, uint32_t data)
554
{
555
    cpu_outb(env, port, data & 0xff);
556
}
557

    
558
target_ulong helper_inb(uint32_t port)
559
{
560
    return cpu_inb(env, port);
561
}
562

    
563
void helper_outw(uint32_t port, uint32_t data)
564
{
565
    cpu_outw(env, port, data & 0xffff);
566
}
567

    
568
target_ulong helper_inw(uint32_t port)
569
{
570
    return cpu_inw(env, port);
571
}
572

    
573
void helper_outl(uint32_t port, uint32_t data)
574
{
575
    cpu_outl(env, port, data);
576
}
577

    
578
target_ulong helper_inl(uint32_t port)
579
{
580
    return cpu_inl(env, port);
581
}
582

    
583
static inline unsigned int get_sp_mask(unsigned int e2)
584
{
585
    if (e2 & DESC_B_MASK)
586
        return 0xffffffff;
587
    else
588
        return 0xffff;
589
}
590

    
591
#ifdef TARGET_X86_64
592
#define SET_ESP(val, sp_mask)\
593
do {\
594
    if ((sp_mask) == 0xffff)\
595
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
596
    else if ((sp_mask) == 0xffffffffLL)\
597
        ESP = (uint32_t)(val);\
598
    else\
599
        ESP = (val);\
600
} while (0)
601
#else
602
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
603
#endif
604

    
605
/* in 64-bit machines, this can overflow. So this segment addition macro
606
 * can be used to trim the value to 32-bit whenever needed */
607
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
608

    
609
/* XXX: add a is_user flag to have proper security support */
610
#define PUSHW(ssp, sp, sp_mask, val)\
611
{\
612
    sp -= 2;\
613
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
614
}
615

    
616
#define PUSHL(ssp, sp, sp_mask, val)\
617
{\
618
    sp -= 4;\
619
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
620
}
621

    
622
#define POPW(ssp, sp, sp_mask, val)\
623
{\
624
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
625
    sp += 2;\
626
}
627

    
628
#define POPL(ssp, sp, sp_mask, val)\
629
{\
630
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
631
    sp += 4;\
632
}
633

    
634
/* protected mode interrupt */
635
static void do_interrupt_protected(int intno, int is_int, int error_code,
636
                                   unsigned int next_eip, int is_hw)
637
{
638
    SegmentCache *dt;
639
    target_ulong ptr, ssp;
640
    int type, dpl, selector, ss_dpl, cpl;
641
    int has_error_code, new_stack, shift;
642
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
643
    uint32_t old_eip, sp_mask;
644

    
645
    has_error_code = 0;
646
    if (!is_int && !is_hw) {
647
        switch(intno) {
648
        case 8:
649
        case 10:
650
        case 11:
651
        case 12:
652
        case 13:
653
        case 14:
654
        case 17:
655
            has_error_code = 1;
656
            break;
657
        }
658
    }
659
    if (is_int)
660
        old_eip = next_eip;
661
    else
662
        old_eip = env->eip;
663

    
664
    dt = &env->idt;
665
    if (intno * 8 + 7 > dt->limit)
666
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
667
    ptr = dt->base + intno * 8;
668
    e1 = ldl_kernel(ptr);
669
    e2 = ldl_kernel(ptr + 4);
670
    /* check gate type */
671
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
672
    switch(type) {
673
    case 5: /* task gate */
674
        /* must do that check here to return the correct error code */
675
        if (!(e2 & DESC_P_MASK))
676
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
677
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
678
        if (has_error_code) {
679
            int type;
680
            uint32_t mask;
681
            /* push the error code */
682
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
683
            shift = type >> 3;
684
            if (env->segs[R_SS].flags & DESC_B_MASK)
685
                mask = 0xffffffff;
686
            else
687
                mask = 0xffff;
688
            esp = (ESP - (2 << shift)) & mask;
689
            ssp = env->segs[R_SS].base + esp;
690
            if (shift)
691
                stl_kernel(ssp, error_code);
692
            else
693
                stw_kernel(ssp, error_code);
694
            SET_ESP(esp, mask);
695
        }
696
        return;
697
    case 6: /* 286 interrupt gate */
698
    case 7: /* 286 trap gate */
699
    case 14: /* 386 interrupt gate */
700
    case 15: /* 386 trap gate */
701
        break;
702
    default:
703
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
704
        break;
705
    }
706
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
707
    cpl = env->hflags & HF_CPL_MASK;
708
    /* check privilege if software int */
709
    if (is_int && dpl < cpl)
710
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711
    /* check valid bit */
712
    if (!(e2 & DESC_P_MASK))
713
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
714
    selector = e1 >> 16;
715
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
716
    if ((selector & 0xfffc) == 0)
717
        raise_exception_err(EXCP0D_GPF, 0);
718

    
719
    if (load_segment(&e1, &e2, selector) != 0)
720
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
722
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
723
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
724
    if (dpl > cpl)
725
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
726
    if (!(e2 & DESC_P_MASK))
727
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
728
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
729
        /* to inner privilege */
730
        get_ss_esp_from_tss(&ss, &esp, dpl);
731
        if ((ss & 0xfffc) == 0)
732
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
733
        if ((ss & 3) != dpl)
734
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
736
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
737
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
738
        if (ss_dpl != dpl)
739
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740
        if (!(ss_e2 & DESC_S_MASK) ||
741
            (ss_e2 & DESC_CS_MASK) ||
742
            !(ss_e2 & DESC_W_MASK))
743
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744
        if (!(ss_e2 & DESC_P_MASK))
745
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746
        new_stack = 1;
747
        sp_mask = get_sp_mask(ss_e2);
748
        ssp = get_seg_base(ss_e1, ss_e2);
749
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
750
        /* to same privilege */
751
        if (env->eflags & VM_MASK)
752
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
753
        new_stack = 0;
754
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
755
        ssp = env->segs[R_SS].base;
756
        esp = ESP;
757
        dpl = cpl;
758
    } else {
759
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760
        new_stack = 0; /* avoid warning */
761
        sp_mask = 0; /* avoid warning */
762
        ssp = 0; /* avoid warning */
763
        esp = 0; /* avoid warning */
764
    }
765

    
766
    shift = type >> 3;
767

    
768
#if 0
769
    /* XXX: check that enough room is available */
770
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
771
    if (env->eflags & VM_MASK)
772
        push_size += 8;
773
    push_size <<= shift;
774
#endif
775
    if (shift == 1) {
776
        if (new_stack) {
777
            if (env->eflags & VM_MASK) {
778
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
779
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
780
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
781
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
782
            }
783
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
784
            PUSHL(ssp, esp, sp_mask, ESP);
785
        }
786
        PUSHL(ssp, esp, sp_mask, compute_eflags());
787
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
788
        PUSHL(ssp, esp, sp_mask, old_eip);
789
        if (has_error_code) {
790
            PUSHL(ssp, esp, sp_mask, error_code);
791
        }
792
    } else {
793
        if (new_stack) {
794
            if (env->eflags & VM_MASK) {
795
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
796
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
797
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
798
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
799
            }
800
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
801
            PUSHW(ssp, esp, sp_mask, ESP);
802
        }
803
        PUSHW(ssp, esp, sp_mask, compute_eflags());
804
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
805
        PUSHW(ssp, esp, sp_mask, old_eip);
806
        if (has_error_code) {
807
            PUSHW(ssp, esp, sp_mask, error_code);
808
        }
809
    }
810

    
811
    if (new_stack) {
812
        if (env->eflags & VM_MASK) {
813
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
814
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
815
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
816
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
817
        }
818
        ss = (ss & ~3) | dpl;
819
        cpu_x86_load_seg_cache(env, R_SS, ss,
820
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
821
    }
822
    SET_ESP(esp, sp_mask);
823

    
824
    selector = (selector & ~3) | dpl;
825
    cpu_x86_load_seg_cache(env, R_CS, selector,
826
                   get_seg_base(e1, e2),
827
                   get_seg_limit(e1, e2),
828
                   e2);
829
    cpu_x86_set_cpl(env, dpl);
830
    env->eip = offset;
831

    
832
    /* interrupt gate clear IF mask */
833
    if ((type & 1) == 0) {
834
        env->eflags &= ~IF_MASK;
835
    }
836
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
837
}
838

    
839
#ifdef TARGET_X86_64
840

    
841
#define PUSHQ(sp, val)\
842
{\
843
    sp -= 8;\
844
    stq_kernel(sp, (val));\
845
}
846

    
847
#define POPQ(sp, val)\
848
{\
849
    val = ldq_kernel(sp);\
850
    sp += 8;\
851
}
852

    
853
static inline target_ulong get_rsp_from_tss(int level)
854
{
855
    int index;
856

    
857
#if 0
858
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
859
           env->tr.base, env->tr.limit);
860
#endif
861

    
862
    if (!(env->tr.flags & DESC_P_MASK))
863
        cpu_abort(env, "invalid tss");
864
    index = 8 * level + 4;
865
    if ((index + 7) > env->tr.limit)
866
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
867
    return ldq_kernel(env->tr.base + index);
868
}
869

    
870
/* 64 bit interrupt */
871
static void do_interrupt64(int intno, int is_int, int error_code,
872
                           target_ulong next_eip, int is_hw)
873
{
874
    SegmentCache *dt;
875
    target_ulong ptr;
876
    int type, dpl, selector, cpl, ist;
877
    int has_error_code, new_stack;
878
    uint32_t e1, e2, e3, ss;
879
    target_ulong old_eip, esp, offset;
880

    
881
    has_error_code = 0;
882
    if (!is_int && !is_hw) {
883
        switch(intno) {
884
        case 8:
885
        case 10:
886
        case 11:
887
        case 12:
888
        case 13:
889
        case 14:
890
        case 17:
891
            has_error_code = 1;
892
            break;
893
        }
894
    }
895
    if (is_int)
896
        old_eip = next_eip;
897
    else
898
        old_eip = env->eip;
899

    
900
    dt = &env->idt;
901
    if (intno * 16 + 15 > dt->limit)
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
    ptr = dt->base + intno * 16;
904
    e1 = ldl_kernel(ptr);
905
    e2 = ldl_kernel(ptr + 4);
906
    e3 = ldl_kernel(ptr + 8);
907
    /* check gate type */
908
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
909
    switch(type) {
910
    case 14: /* 386 interrupt gate */
911
    case 15: /* 386 trap gate */
912
        break;
913
    default:
914
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
915
        break;
916
    }
917
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918
    cpl = env->hflags & HF_CPL_MASK;
919
    /* check privilege if software int */
920
    if (is_int && dpl < cpl)
921
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922
    /* check valid bit */
923
    if (!(e2 & DESC_P_MASK))
924
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
925
    selector = e1 >> 16;
926
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
927
    ist = e2 & 7;
928
    if ((selector & 0xfffc) == 0)
929
        raise_exception_err(EXCP0D_GPF, 0);
930

    
931
    if (load_segment(&e1, &e2, selector) != 0)
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
934
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936
    if (dpl > cpl)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_P_MASK))
939
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
940
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
943
        /* to inner privilege */
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = get_rsp_from_tss(dpl);
948
        esp &= ~0xfLL; /* align stack */
949
        ss = 0;
950
        new_stack = 1;
951
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952
        /* to same privilege */
953
        if (env->eflags & VM_MASK)
954
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0;
956
        if (ist != 0)
957
            esp = get_rsp_from_tss(ist + 3);
958
        else
959
            esp = ESP;
960
        esp &= ~0xfLL; /* align stack */
961
        dpl = cpl;
962
    } else {
963
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964
        new_stack = 0; /* avoid warning */
965
        esp = 0; /* avoid warning */
966
    }
967

    
968
    PUSHQ(esp, env->segs[R_SS].selector);
969
    PUSHQ(esp, ESP);
970
    PUSHQ(esp, compute_eflags());
971
    PUSHQ(esp, env->segs[R_CS].selector);
972
    PUSHQ(esp, old_eip);
973
    if (has_error_code) {
974
        PUSHQ(esp, error_code);
975
    }
976

    
977
    if (new_stack) {
978
        ss = 0 | dpl;
979
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
980
    }
981
    ESP = esp;
982

    
983
    selector = (selector & ~3) | dpl;
984
    cpu_x86_load_seg_cache(env, R_CS, selector,
985
                   get_seg_base(e1, e2),
986
                   get_seg_limit(e1, e2),
987
                   e2);
988
    cpu_x86_set_cpl(env, dpl);
989
    env->eip = offset;
990

    
991
    /* interrupt gate clear IF mask */
992
    if ((type & 1) == 0) {
993
        env->eflags &= ~IF_MASK;
994
    }
995
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
996
}
997
#endif
998

    
999
#ifdef TARGET_X86_64
1000
#if defined(CONFIG_USER_ONLY)
1001
void helper_syscall(int next_eip_addend)
1002
{
1003
    env->exception_index = EXCP_SYSCALL;
1004
    env->exception_next_eip = env->eip + next_eip_addend;
1005
    cpu_loop_exit();
1006
}
1007
#else
1008
void helper_syscall(int next_eip_addend)
1009
{
1010
    int selector;
1011

    
1012
    if (!(env->efer & MSR_EFER_SCE)) {
1013
        raise_exception_err(EXCP06_ILLOP, 0);
1014
    }
1015
    selector = (env->star >> 32) & 0xffff;
1016
    if (env->hflags & HF_LMA_MASK) {
1017
        int code64;
1018

    
1019
        ECX = env->eip + next_eip_addend;
1020
        env->regs[11] = compute_eflags();
1021

    
1022
        code64 = env->hflags & HF_CS64_MASK;
1023

    
1024
        cpu_x86_set_cpl(env, 0);
1025
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026
                           0, 0xffffffff,
1027
                               DESC_G_MASK | DESC_P_MASK |
1028
                               DESC_S_MASK |
1029
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1030
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031
                               0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_W_MASK | DESC_A_MASK);
1035
        env->eflags &= ~env->fmask;
1036
        load_eflags(env->eflags, 0);
1037
        if (code64)
1038
            env->eip = env->lstar;
1039
        else
1040
            env->eip = env->cstar;
1041
    } else {
1042
        ECX = (uint32_t)(env->eip + next_eip_addend);
1043

    
1044
        cpu_x86_set_cpl(env, 0);
1045
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1046
                           0, 0xffffffff,
1047
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048
                               DESC_S_MASK |
1049
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1050
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1051
                               0, 0xffffffff,
1052
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053
                               DESC_S_MASK |
1054
                               DESC_W_MASK | DESC_A_MASK);
1055
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1056
        env->eip = (uint32_t)env->star;
1057
    }
1058
}
1059
#endif
1060
#endif
1061

    
1062
#ifdef TARGET_X86_64
1063
void helper_sysret(int dflag)
1064
{
1065
    int cpl, selector;
1066

    
1067
    if (!(env->efer & MSR_EFER_SCE)) {
1068
        raise_exception_err(EXCP06_ILLOP, 0);
1069
    }
1070
    cpl = env->hflags & HF_CPL_MASK;
1071
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1072
        raise_exception_err(EXCP0D_GPF, 0);
1073
    }
1074
    selector = (env->star >> 48) & 0xffff;
1075
    if (env->hflags & HF_LMA_MASK) {
1076
        if (dflag == 2) {
1077
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
                                   DESC_L_MASK);
1083
            env->eip = ECX;
1084
        } else {
1085
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086
                                   0, 0xffffffff,
1087
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090
            env->eip = (uint32_t)ECX;
1091
        }
1092
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_W_MASK | DESC_A_MASK);
1097
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
        cpu_x86_set_cpl(env, 3);
1100
    } else {
1101
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106
        env->eip = (uint32_t)ECX;
1107
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108
                               0, 0xffffffff,
1109
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111
                               DESC_W_MASK | DESC_A_MASK);
1112
        env->eflags |= IF_MASK;
1113
        cpu_x86_set_cpl(env, 3);
1114
    }
1115
#ifdef USE_KQEMU
1116
    if (kqemu_is_ok(env)) {
1117
        if (env->hflags & HF_LMA_MASK)
1118
            CC_OP = CC_OP_EFLAGS;
1119
        env->exception_index = -1;
1120
        cpu_loop_exit();
1121
    }
1122
#endif
1123
}
1124
#endif
1125

    
1126
/* real mode interrupt */
1127
static void do_interrupt_real(int intno, int is_int, int error_code,
1128
                              unsigned int next_eip)
1129
{
1130
    SegmentCache *dt;
1131
    target_ulong ptr, ssp;
1132
    int selector;
1133
    uint32_t offset, esp;
1134
    uint32_t old_cs, old_eip;
1135

    
1136
    /* real mode (simpler !) */
1137
    dt = &env->idt;
1138
    if (intno * 4 + 3 > dt->limit)
1139
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1140
    ptr = dt->base + intno * 4;
1141
    offset = lduw_kernel(ptr);
1142
    selector = lduw_kernel(ptr + 2);
1143
    esp = ESP;
1144
    ssp = env->segs[R_SS].base;
1145
    if (is_int)
1146
        old_eip = next_eip;
1147
    else
1148
        old_eip = env->eip;
1149
    old_cs = env->segs[R_CS].selector;
1150
    /* XXX: use SS segment size ? */
1151
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1152
    PUSHW(ssp, esp, 0xffff, old_cs);
1153
    PUSHW(ssp, esp, 0xffff, old_eip);
1154

    
1155
    /* update processor state */
1156
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1157
    env->eip = offset;
1158
    env->segs[R_CS].selector = selector;
1159
    env->segs[R_CS].base = (selector << 4);
1160
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1161
}
1162

    
1163
/* fake user mode interrupt */
1164
void do_interrupt_user(int intno, int is_int, int error_code,
1165
                       target_ulong next_eip)
1166
{
1167
    SegmentCache *dt;
1168
    target_ulong ptr;
1169
    int dpl, cpl, shift;
1170
    uint32_t e2;
1171

    
1172
    dt = &env->idt;
1173
    if (env->hflags & HF_LMA_MASK) {
1174
        shift = 4;
1175
    } else {
1176
        shift = 3;
1177
    }
1178
    ptr = dt->base + (intno << shift);
1179
    e2 = ldl_kernel(ptr + 4);
1180

    
1181
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1182
    cpl = env->hflags & HF_CPL_MASK;
1183
    /* check privilege if software int */
1184
    if (is_int && dpl < cpl)
1185
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1186

    
1187
    /* Since we emulate only user space, we cannot do more than
1188
       exiting the emulation with the suitable exception and error
1189
       code */
1190
    if (is_int)
1191
        EIP = next_eip;
1192
}
1193

    
1194
/*
1195
 * Begin execution of an interruption. is_int is TRUE if coming from
1196
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1197
 * instruction. It is only relevant if is_int is TRUE.
1198
 */
1199
void do_interrupt(int intno, int is_int, int error_code,
1200
                  target_ulong next_eip, int is_hw)
1201
{
1202
    if (loglevel & CPU_LOG_INT) {
1203
        if ((env->cr[0] & CR0_PE_MASK)) {
1204
            static int count;
1205
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1206
                    count, intno, error_code, is_int,
1207
                    env->hflags & HF_CPL_MASK,
1208
                    env->segs[R_CS].selector, EIP,
1209
                    (int)env->segs[R_CS].base + EIP,
1210
                    env->segs[R_SS].selector, ESP);
1211
            if (intno == 0x0e) {
1212
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1213
            } else {
1214
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1215
            }
1216
            fprintf(logfile, "\n");
1217
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1218
#if 0
1219
            {
1220
                int i;
1221
                uint8_t *ptr;
1222
                fprintf(logfile, "       code=");
1223
                ptr = env->segs[R_CS].base + env->eip;
1224
                for(i = 0; i < 16; i++) {
1225
                    fprintf(logfile, " %02x", ldub(ptr + i));
1226
                }
1227
                fprintf(logfile, "\n");
1228
            }
1229
#endif
1230
            count++;
1231
        }
1232
    }
1233
    if (env->cr[0] & CR0_PE_MASK) {
1234
#ifdef TARGET_X86_64
1235
        if (env->hflags & HF_LMA_MASK) {
1236
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1237
        } else
1238
#endif
1239
        {
1240
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1241
        }
1242
    } else {
1243
        do_interrupt_real(intno, is_int, error_code, next_eip);
1244
    }
1245
}
1246

    
1247
/*
1248
 * Check nested exceptions and change to double or triple fault if
1249
 * needed. It should only be called, if this is not an interrupt.
1250
 * Returns the new exception number.
1251
 */
1252
static int check_exception(int intno, int *error_code)
1253
{
1254
    int first_contributory = env->old_exception == 0 ||
1255
                              (env->old_exception >= 10 &&
1256
                               env->old_exception <= 13);
1257
    int second_contributory = intno == 0 ||
1258
                               (intno >= 10 && intno <= 13);
1259

    
1260
    if (loglevel & CPU_LOG_INT)
1261
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1262
                env->old_exception, intno);
1263

    
1264
    if (env->old_exception == EXCP08_DBLE)
1265
        cpu_abort(env, "triple fault");
1266

    
1267
    if ((first_contributory && second_contributory)
1268
        || (env->old_exception == EXCP0E_PAGE &&
1269
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1270
        intno = EXCP08_DBLE;
1271
        *error_code = 0;
1272
    }
1273

    
1274
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1275
        (intno == EXCP08_DBLE))
1276
        env->old_exception = intno;
1277

    
1278
    return intno;
1279
}
1280

    
1281
/*
1282
 * Signal an interruption. It is executed in the main CPU loop.
1283
 * is_int is TRUE if coming from the int instruction. next_eip is the
1284
 * EIP value AFTER the interrupt instruction. It is only relevant if
1285
 * is_int is TRUE.
1286
 */
1287
static void noreturn raise_interrupt(int intno, int is_int, int error_code,
1288
                                     int next_eip_addend)
1289
{
1290
    if (!is_int) {
1291
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1292
        intno = check_exception(intno, &error_code);
1293
    } else {
1294
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1295
    }
1296

    
1297
    env->exception_index = intno;
1298
    env->error_code = error_code;
1299
    env->exception_is_int = is_int;
1300
    env->exception_next_eip = env->eip + next_eip_addend;
1301
    cpu_loop_exit();
1302
}
1303

    
1304
/* shortcuts to generate exceptions */
1305

    
1306
void raise_exception_err(int exception_index, int error_code)
1307
{
1308
    raise_interrupt(exception_index, 0, error_code, 0);
1309
}
1310

    
1311
void raise_exception(int exception_index)
1312
{
1313
    raise_interrupt(exception_index, 0, 0, 0);
1314
}
1315

    
1316
/* SMM support */
1317

    
1318
#if defined(CONFIG_USER_ONLY)
1319

    
1320
void do_smm_enter(void)
1321
{
1322
}
1323

    
1324
void helper_rsm(void)
1325
{
1326
}
1327

    
1328
#else
1329

    
1330
#ifdef TARGET_X86_64
1331
#define SMM_REVISION_ID 0x00020064
1332
#else
1333
#define SMM_REVISION_ID 0x00020000
1334
#endif
1335

    
1336
void do_smm_enter(void)
1337
{
1338
    target_ulong sm_state;
1339
    SegmentCache *dt;
1340
    int i, offset;
1341

    
1342
    if (loglevel & CPU_LOG_INT) {
1343
        fprintf(logfile, "SMM: enter\n");
1344
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1345
    }
1346

    
1347
    env->hflags |= HF_SMM_MASK;
1348
    cpu_smm_update(env);
1349

    
1350
    sm_state = env->smbase + 0x8000;
1351

    
1352
#ifdef TARGET_X86_64
1353
    for(i = 0; i < 6; i++) {
1354
        dt = &env->segs[i];
1355
        offset = 0x7e00 + i * 16;
1356
        stw_phys(sm_state + offset, dt->selector);
1357
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1358
        stl_phys(sm_state + offset + 4, dt->limit);
1359
        stq_phys(sm_state + offset + 8, dt->base);
1360
    }
1361

    
1362
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1363
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1364

    
1365
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1366
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1367
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1368
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1369

    
1370
    stq_phys(sm_state + 0x7e88, env->idt.base);
1371
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1372

    
1373
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1374
    stq_phys(sm_state + 0x7e98, env->tr.base);
1375
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1376
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1377

    
1378
    stq_phys(sm_state + 0x7ed0, env->efer);
1379

    
1380
    stq_phys(sm_state + 0x7ff8, EAX);
1381
    stq_phys(sm_state + 0x7ff0, ECX);
1382
    stq_phys(sm_state + 0x7fe8, EDX);
1383
    stq_phys(sm_state + 0x7fe0, EBX);
1384
    stq_phys(sm_state + 0x7fd8, ESP);
1385
    stq_phys(sm_state + 0x7fd0, EBP);
1386
    stq_phys(sm_state + 0x7fc8, ESI);
1387
    stq_phys(sm_state + 0x7fc0, EDI);
1388
    for(i = 8; i < 16; i++)
1389
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1390
    stq_phys(sm_state + 0x7f78, env->eip);
1391
    stl_phys(sm_state + 0x7f70, compute_eflags());
1392
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1393
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1394

    
1395
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1396
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1397
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1398

    
1399
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1400
    stl_phys(sm_state + 0x7f00, env->smbase);
1401
#else
1402
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1403
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1404
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1405
    stl_phys(sm_state + 0x7ff0, env->eip);
1406
    stl_phys(sm_state + 0x7fec, EDI);
1407
    stl_phys(sm_state + 0x7fe8, ESI);
1408
    stl_phys(sm_state + 0x7fe4, EBP);
1409
    stl_phys(sm_state + 0x7fe0, ESP);
1410
    stl_phys(sm_state + 0x7fdc, EBX);
1411
    stl_phys(sm_state + 0x7fd8, EDX);
1412
    stl_phys(sm_state + 0x7fd4, ECX);
1413
    stl_phys(sm_state + 0x7fd0, EAX);
1414
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1415
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1416

    
1417
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1418
    stl_phys(sm_state + 0x7f64, env->tr.base);
1419
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1420
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1421

    
1422
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1423
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1424
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1425
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1426

    
1427
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1428
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1429

    
1430
    stl_phys(sm_state + 0x7f58, env->idt.base);
1431
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1432

    
1433
    for(i = 0; i < 6; i++) {
1434
        dt = &env->segs[i];
1435
        if (i < 3)
1436
            offset = 0x7f84 + i * 12;
1437
        else
1438
            offset = 0x7f2c + (i - 3) * 12;
1439
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1440
        stl_phys(sm_state + offset + 8, dt->base);
1441
        stl_phys(sm_state + offset + 4, dt->limit);
1442
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1443
    }
1444
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1445

    
1446
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1447
    stl_phys(sm_state + 0x7ef8, env->smbase);
1448
#endif
1449
    /* init SMM cpu state */
1450

    
1451
#ifdef TARGET_X86_64
1452
    cpu_load_efer(env, 0);
1453
#endif
1454
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1455
    env->eip = 0x00008000;
1456
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1457
                           0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1460
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1461
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1462
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1463

    
1464
    cpu_x86_update_cr0(env,
1465
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1466
    cpu_x86_update_cr4(env, 0);
1467
    env->dr[7] = 0x00000400;
1468
    CC_OP = CC_OP_EFLAGS;
1469
}
1470

    
1471
void helper_rsm(void)
1472
{
1473
    target_ulong sm_state;
1474
    int i, offset;
1475
    uint32_t val;
1476

    
1477
    sm_state = env->smbase + 0x8000;
1478
#ifdef TARGET_X86_64
1479
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1480

    
1481
    for(i = 0; i < 6; i++) {
1482
        offset = 0x7e00 + i * 16;
1483
        cpu_x86_load_seg_cache(env, i,
1484
                               lduw_phys(sm_state + offset),
1485
                               ldq_phys(sm_state + offset + 8),
1486
                               ldl_phys(sm_state + offset + 4),
1487
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1488
    }
1489

    
1490
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1491
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1492

    
1493
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1494
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1495
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1496
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1497

    
1498
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1499
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1500

    
1501
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1502
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1503
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1504
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1505

    
1506
    EAX = ldq_phys(sm_state + 0x7ff8);
1507
    ECX = ldq_phys(sm_state + 0x7ff0);
1508
    EDX = ldq_phys(sm_state + 0x7fe8);
1509
    EBX = ldq_phys(sm_state + 0x7fe0);
1510
    ESP = ldq_phys(sm_state + 0x7fd8);
1511
    EBP = ldq_phys(sm_state + 0x7fd0);
1512
    ESI = ldq_phys(sm_state + 0x7fc8);
1513
    EDI = ldq_phys(sm_state + 0x7fc0);
1514
    for(i = 8; i < 16; i++)
1515
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1516
    env->eip = ldq_phys(sm_state + 0x7f78);
1517
    load_eflags(ldl_phys(sm_state + 0x7f70),
1518
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1519
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1520
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1521

    
1522
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1523
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1524
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1525

    
1526
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1527
    if (val & 0x20000) {
1528
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1529
    }
1530
#else
1531
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1532
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1533
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1534
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1535
    env->eip = ldl_phys(sm_state + 0x7ff0);
1536
    EDI = ldl_phys(sm_state + 0x7fec);
1537
    ESI = ldl_phys(sm_state + 0x7fe8);
1538
    EBP = ldl_phys(sm_state + 0x7fe4);
1539
    ESP = ldl_phys(sm_state + 0x7fe0);
1540
    EBX = ldl_phys(sm_state + 0x7fdc);
1541
    EDX = ldl_phys(sm_state + 0x7fd8);
1542
    ECX = ldl_phys(sm_state + 0x7fd4);
1543
    EAX = ldl_phys(sm_state + 0x7fd0);
1544
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1545
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1546

    
1547
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1548
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1549
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1550
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1551

    
1552
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1553
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1554
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1555
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1556

    
1557
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1558
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1559

    
1560
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1561
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1562

    
1563
    for(i = 0; i < 6; i++) {
1564
        if (i < 3)
1565
            offset = 0x7f84 + i * 12;
1566
        else
1567
            offset = 0x7f2c + (i - 3) * 12;
1568
        cpu_x86_load_seg_cache(env, i,
1569
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1570
                               ldl_phys(sm_state + offset + 8),
1571
                               ldl_phys(sm_state + offset + 4),
1572
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1573
    }
1574
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1575

    
1576
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1577
    if (val & 0x20000) {
1578
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1579
    }
1580
#endif
1581
    CC_OP = CC_OP_EFLAGS;
1582
    env->hflags &= ~HF_SMM_MASK;
1583
    cpu_smm_update(env);
1584

    
1585
    if (loglevel & CPU_LOG_INT) {
1586
        fprintf(logfile, "SMM: after RSM\n");
1587
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1588
    }
1589
}
1590

    
1591
#endif /* !CONFIG_USER_ONLY */
1592

    
1593

    
1594
/* division, flags are undefined */
1595

    
1596
void helper_divb_AL(target_ulong t0)
1597
{
1598
    unsigned int num, den, q, r;
1599

    
1600
    num = (EAX & 0xffff);
1601
    den = (t0 & 0xff);
1602
    if (den == 0) {
1603
        raise_exception(EXCP00_DIVZ);
1604
    }
1605
    q = (num / den);
1606
    if (q > 0xff)
1607
        raise_exception(EXCP00_DIVZ);
1608
    q &= 0xff;
1609
    r = (num % den) & 0xff;
1610
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1611
}
1612

    
1613
void helper_idivb_AL(target_ulong t0)
1614
{
1615
    int num, den, q, r;
1616

    
1617
    num = (int16_t)EAX;
1618
    den = (int8_t)t0;
1619
    if (den == 0) {
1620
        raise_exception(EXCP00_DIVZ);
1621
    }
1622
    q = (num / den);
1623
    if (q != (int8_t)q)
1624
        raise_exception(EXCP00_DIVZ);
1625
    q &= 0xff;
1626
    r = (num % den) & 0xff;
1627
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1628
}
1629

    
1630
void helper_divw_AX(target_ulong t0)
1631
{
1632
    unsigned int num, den, q, r;
1633

    
1634
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1635
    den = (t0 & 0xffff);
1636
    if (den == 0) {
1637
        raise_exception(EXCP00_DIVZ);
1638
    }
1639
    q = (num / den);
1640
    if (q > 0xffff)
1641
        raise_exception(EXCP00_DIVZ);
1642
    q &= 0xffff;
1643
    r = (num % den) & 0xffff;
1644
    EAX = (EAX & ~0xffff) | q;
1645
    EDX = (EDX & ~0xffff) | r;
1646
}
1647

    
1648
void helper_idivw_AX(target_ulong t0)
1649
{
1650
    int num, den, q, r;
1651

    
1652
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1653
    den = (int16_t)t0;
1654
    if (den == 0) {
1655
        raise_exception(EXCP00_DIVZ);
1656
    }
1657
    q = (num / den);
1658
    if (q != (int16_t)q)
1659
        raise_exception(EXCP00_DIVZ);
1660
    q &= 0xffff;
1661
    r = (num % den) & 0xffff;
1662
    EAX = (EAX & ~0xffff) | q;
1663
    EDX = (EDX & ~0xffff) | r;
1664
}
1665

    
1666
void helper_divl_EAX(target_ulong t0)
1667
{
1668
    unsigned int den, r;
1669
    uint64_t num, q;
1670

    
1671
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1672
    den = t0;
1673
    if (den == 0) {
1674
        raise_exception(EXCP00_DIVZ);
1675
    }
1676
    q = (num / den);
1677
    r = (num % den);
1678
    if (q > 0xffffffff)
1679
        raise_exception(EXCP00_DIVZ);
1680
    EAX = (uint32_t)q;
1681
    EDX = (uint32_t)r;
1682
}
1683

    
1684
void helper_idivl_EAX(target_ulong t0)
1685
{
1686
    int den, r;
1687
    int64_t num, q;
1688

    
1689
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1690
    den = t0;
1691
    if (den == 0) {
1692
        raise_exception(EXCP00_DIVZ);
1693
    }
1694
    q = (num / den);
1695
    r = (num % den);
1696
    if (q != (int32_t)q)
1697
        raise_exception(EXCP00_DIVZ);
1698
    EAX = (uint32_t)q;
1699
    EDX = (uint32_t)r;
1700
}
1701

    
1702
/* bcd */
1703

    
1704
/* XXX: exception */
1705
void helper_aam(int base)
1706
{
1707
    int al, ah;
1708
    al = EAX & 0xff;
1709
    ah = al / base;
1710
    al = al % base;
1711
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1712
    CC_DST = al;
1713
}
1714

    
1715
void helper_aad(int base)
1716
{
1717
    int al, ah;
1718
    al = EAX & 0xff;
1719
    ah = (EAX >> 8) & 0xff;
1720
    al = ((ah * base) + al) & 0xff;
1721
    EAX = (EAX & ~0xffff) | al;
1722
    CC_DST = al;
1723
}
1724

    
1725
void helper_aaa(void)
1726
{
1727
    int icarry;
1728
    int al, ah, af;
1729
    int eflags;
1730

    
1731
    eflags = helper_cc_compute_all(CC_OP);
1732
    af = eflags & CC_A;
1733
    al = EAX & 0xff;
1734
    ah = (EAX >> 8) & 0xff;
1735

    
1736
    icarry = (al > 0xf9);
1737
    if (((al & 0x0f) > 9 ) || af) {
1738
        al = (al + 6) & 0x0f;
1739
        ah = (ah + 1 + icarry) & 0xff;
1740
        eflags |= CC_C | CC_A;
1741
    } else {
1742
        eflags &= ~(CC_C | CC_A);
1743
        al &= 0x0f;
1744
    }
1745
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1746
    CC_SRC = eflags;
1747
}
1748

    
1749
void helper_aas(void)
1750
{
1751
    int icarry;
1752
    int al, ah, af;
1753
    int eflags;
1754

    
1755
    eflags = helper_cc_compute_all(CC_OP);
1756
    af = eflags & CC_A;
1757
    al = EAX & 0xff;
1758
    ah = (EAX >> 8) & 0xff;
1759

    
1760
    icarry = (al < 6);
1761
    if (((al & 0x0f) > 9 ) || af) {
1762
        al = (al - 6) & 0x0f;
1763
        ah = (ah - 1 - icarry) & 0xff;
1764
        eflags |= CC_C | CC_A;
1765
    } else {
1766
        eflags &= ~(CC_C | CC_A);
1767
        al &= 0x0f;
1768
    }
1769
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1770
    CC_SRC = eflags;
1771
}
1772

    
1773
void helper_daa(void)
1774
{
1775
    int al, af, cf;
1776
    int eflags;
1777

    
1778
    eflags = helper_cc_compute_all(CC_OP);
1779
    cf = eflags & CC_C;
1780
    af = eflags & CC_A;
1781
    al = EAX & 0xff;
1782

    
1783
    eflags = 0;
1784
    if (((al & 0x0f) > 9 ) || af) {
1785
        al = (al + 6) & 0xff;
1786
        eflags |= CC_A;
1787
    }
1788
    if ((al > 0x9f) || cf) {
1789
        al = (al + 0x60) & 0xff;
1790
        eflags |= CC_C;
1791
    }
1792
    EAX = (EAX & ~0xff) | al;
1793
    /* well, speed is not an issue here, so we compute the flags by hand */
1794
    eflags |= (al == 0) << 6; /* zf */
1795
    eflags |= parity_table[al]; /* pf */
1796
    eflags |= (al & 0x80); /* sf */
1797
    CC_SRC = eflags;
1798
}
1799

    
1800
void helper_das(void)
1801
{
1802
    int al, al1, af, cf;
1803
    int eflags;
1804

    
1805
    eflags = helper_cc_compute_all(CC_OP);
1806
    cf = eflags & CC_C;
1807
    af = eflags & CC_A;
1808
    al = EAX & 0xff;
1809

    
1810
    eflags = 0;
1811
    al1 = al;
1812
    if (((al & 0x0f) > 9 ) || af) {
1813
        eflags |= CC_A;
1814
        if (al < 6 || cf)
1815
            eflags |= CC_C;
1816
        al = (al - 6) & 0xff;
1817
    }
1818
    if ((al1 > 0x99) || cf) {
1819
        al = (al - 0x60) & 0xff;
1820
        eflags |= CC_C;
1821
    }
1822
    EAX = (EAX & ~0xff) | al;
1823
    /* well, speed is not an issue here, so we compute the flags by hand */
1824
    eflags |= (al == 0) << 6; /* zf */
1825
    eflags |= parity_table[al]; /* pf */
1826
    eflags |= (al & 0x80); /* sf */
1827
    CC_SRC = eflags;
1828
}
1829

    
1830
void helper_into(int next_eip_addend)
1831
{
1832
    int eflags;
1833
    eflags = helper_cc_compute_all(CC_OP);
1834
    if (eflags & CC_O) {
1835
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1836
    }
1837
}
1838

    
1839
void helper_cmpxchg8b(target_ulong a0)
1840
{
1841
    uint64_t d;
1842
    int eflags;
1843

    
1844
    eflags = helper_cc_compute_all(CC_OP);
1845
    d = ldq(a0);
1846
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1847
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1848
        eflags |= CC_Z;
1849
    } else {
1850
        /* always do the store */
1851
        stq(a0, d); 
1852
        EDX = (uint32_t)(d >> 32);
1853
        EAX = (uint32_t)d;
1854
        eflags &= ~CC_Z;
1855
    }
1856
    CC_SRC = eflags;
1857
}
1858

    
1859
#ifdef TARGET_X86_64
1860
void helper_cmpxchg16b(target_ulong a0)
1861
{
1862
    uint64_t d0, d1;
1863
    int eflags;
1864

    
1865
    if ((a0 & 0xf) != 0)
1866
        raise_exception(EXCP0D_GPF);
1867
    eflags = helper_cc_compute_all(CC_OP);
1868
    d0 = ldq(a0);
1869
    d1 = ldq(a0 + 8);
1870
    if (d0 == EAX && d1 == EDX) {
1871
        stq(a0, EBX);
1872
        stq(a0 + 8, ECX);
1873
        eflags |= CC_Z;
1874
    } else {
1875
        /* always do the store */
1876
        stq(a0, d0); 
1877
        stq(a0 + 8, d1); 
1878
        EDX = d1;
1879
        EAX = d0;
1880
        eflags &= ~CC_Z;
1881
    }
1882
    CC_SRC = eflags;
1883
}
1884
#endif
1885

    
1886
void helper_single_step(void)
1887
{
1888
#ifndef CONFIG_USER_ONLY
1889
    check_hw_breakpoints(env, 1);
1890
    env->dr[6] |= DR6_BS;
1891
#endif
1892
    raise_exception(EXCP01_DB);
1893
}
1894

    
1895
void helper_cpuid(void)
1896
{
1897
    uint32_t eax, ebx, ecx, edx;
1898

    
1899
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1900

    
1901
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1902
    EAX = eax;
1903
    EBX = ebx;
1904
    ECX = ecx;
1905
    EDX = edx;
1906
}
1907

    
1908
void helper_enter_level(int level, int data32, target_ulong t1)
1909
{
1910
    target_ulong ssp;
1911
    uint32_t esp_mask, esp, ebp;
1912

    
1913
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1914
    ssp = env->segs[R_SS].base;
1915
    ebp = EBP;
1916
    esp = ESP;
1917
    if (data32) {
1918
        /* 32 bit */
1919
        esp -= 4;
1920
        while (--level) {
1921
            esp -= 4;
1922
            ebp -= 4;
1923
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1924
        }
1925
        esp -= 4;
1926
        stl(ssp + (esp & esp_mask), t1);
1927
    } else {
1928
        /* 16 bit */
1929
        esp -= 2;
1930
        while (--level) {
1931
            esp -= 2;
1932
            ebp -= 2;
1933
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1934
        }
1935
        esp -= 2;
1936
        stw(ssp + (esp & esp_mask), t1);
1937
    }
1938
}
1939

    
1940
#ifdef TARGET_X86_64
1941
void helper_enter64_level(int level, int data64, target_ulong t1)
1942
{
1943
    target_ulong esp, ebp;
1944
    ebp = EBP;
1945
    esp = ESP;
1946

    
1947
    if (data64) {
1948
        /* 64 bit */
1949
        esp -= 8;
1950
        while (--level) {
1951
            esp -= 8;
1952
            ebp -= 8;
1953
            stq(esp, ldq(ebp));
1954
        }
1955
        esp -= 8;
1956
        stq(esp, t1);
1957
    } else {
1958
        /* 16 bit */
1959
        esp -= 2;
1960
        while (--level) {
1961
            esp -= 2;
1962
            ebp -= 2;
1963
            stw(esp, lduw(ebp));
1964
        }
1965
        esp -= 2;
1966
        stw(esp, t1);
1967
    }
1968
}
1969
#endif
1970

    
1971
void helper_lldt(int selector)
1972
{
1973
    SegmentCache *dt;
1974
    uint32_t e1, e2;
1975
    int index, entry_limit;
1976
    target_ulong ptr;
1977

    
1978
    selector &= 0xffff;
1979
    if ((selector & 0xfffc) == 0) {
1980
        /* XXX: NULL selector case: invalid LDT */
1981
        env->ldt.base = 0;
1982
        env->ldt.limit = 0;
1983
    } else {
1984
        if (selector & 0x4)
1985
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1986
        dt = &env->gdt;
1987
        index = selector & ~7;
1988
#ifdef TARGET_X86_64
1989
        if (env->hflags & HF_LMA_MASK)
1990
            entry_limit = 15;
1991
        else
1992
#endif
1993
            entry_limit = 7;
1994
        if ((index + entry_limit) > dt->limit)
1995
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1996
        ptr = dt->base + index;
1997
        e1 = ldl_kernel(ptr);
1998
        e2 = ldl_kernel(ptr + 4);
1999
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2000
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2001
        if (!(e2 & DESC_P_MASK))
2002
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2003
#ifdef TARGET_X86_64
2004
        if (env->hflags & HF_LMA_MASK) {
2005
            uint32_t e3;
2006
            e3 = ldl_kernel(ptr + 8);
2007
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2008
            env->ldt.base |= (target_ulong)e3 << 32;
2009
        } else
2010
#endif
2011
        {
2012
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2013
        }
2014
    }
2015
    env->ldt.selector = selector;
2016
}
2017

    
2018
void helper_ltr(int selector)
2019
{
2020
    SegmentCache *dt;
2021
    uint32_t e1, e2;
2022
    int index, type, entry_limit;
2023
    target_ulong ptr;
2024

    
2025
    selector &= 0xffff;
2026
    if ((selector & 0xfffc) == 0) {
2027
        /* NULL selector case: invalid TR */
2028
        env->tr.base = 0;
2029
        env->tr.limit = 0;
2030
        env->tr.flags = 0;
2031
    } else {
2032
        if (selector & 0x4)
2033
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2034
        dt = &env->gdt;
2035
        index = selector & ~7;
2036
#ifdef TARGET_X86_64
2037
        if (env->hflags & HF_LMA_MASK)
2038
            entry_limit = 15;
2039
        else
2040
#endif
2041
            entry_limit = 7;
2042
        if ((index + entry_limit) > dt->limit)
2043
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2044
        ptr = dt->base + index;
2045
        e1 = ldl_kernel(ptr);
2046
        e2 = ldl_kernel(ptr + 4);
2047
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2048
        if ((e2 & DESC_S_MASK) ||
2049
            (type != 1 && type != 9))
2050
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051
        if (!(e2 & DESC_P_MASK))
2052
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2053
#ifdef TARGET_X86_64
2054
        if (env->hflags & HF_LMA_MASK) {
2055
            uint32_t e3, e4;
2056
            e3 = ldl_kernel(ptr + 8);
2057
            e4 = ldl_kernel(ptr + 12);
2058
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2059
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2060
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2061
            env->tr.base |= (target_ulong)e3 << 32;
2062
        } else
2063
#endif
2064
        {
2065
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2066
        }
2067
        e2 |= DESC_TSS_BUSY_MASK;
2068
        stl_kernel(ptr + 4, e2);
2069
    }
2070
    env->tr.selector = selector;
2071
}
2072

    
2073
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2074
void helper_load_seg(int seg_reg, int selector)
2075
{
2076
    uint32_t e1, e2;
2077
    int cpl, dpl, rpl;
2078
    SegmentCache *dt;
2079
    int index;
2080
    target_ulong ptr;
2081

    
2082
    selector &= 0xffff;
2083
    cpl = env->hflags & HF_CPL_MASK;
2084
    if ((selector & 0xfffc) == 0) {
2085
        /* null selector case */
2086
        if (seg_reg == R_SS
2087
#ifdef TARGET_X86_64
2088
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2089
#endif
2090
            )
2091
            raise_exception_err(EXCP0D_GPF, 0);
2092
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2093
    } else {
2094

    
2095
        if (selector & 0x4)
2096
            dt = &env->ldt;
2097
        else
2098
            dt = &env->gdt;
2099
        index = selector & ~7;
2100
        if ((index + 7) > dt->limit)
2101
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2102
        ptr = dt->base + index;
2103
        e1 = ldl_kernel(ptr);
2104
        e2 = ldl_kernel(ptr + 4);
2105

    
2106
        if (!(e2 & DESC_S_MASK))
2107
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2108
        rpl = selector & 3;
2109
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2110
        if (seg_reg == R_SS) {
2111
            /* must be writable segment */
2112
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2113
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2114
            if (rpl != cpl || dpl != cpl)
2115
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116
        } else {
2117
            /* must be readable segment */
2118
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2119
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2120

    
2121
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2122
                /* if not conforming code, test rights */
2123
                if (dpl < cpl || dpl < rpl)
2124
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2125
            }
2126
        }
2127

    
2128
        if (!(e2 & DESC_P_MASK)) {
2129
            if (seg_reg == R_SS)
2130
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2131
            else
2132
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2133
        }
2134

    
2135
        /* set the access bit if not already set */
2136
        if (!(e2 & DESC_A_MASK)) {
2137
            e2 |= DESC_A_MASK;
2138
            stl_kernel(ptr + 4, e2);
2139
        }
2140

    
2141
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2142
                       get_seg_base(e1, e2),
2143
                       get_seg_limit(e1, e2),
2144
                       e2);
2145
#if 0
2146
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2147
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2148
#endif
2149
    }
2150
}
2151

    
2152
/* protected mode jump */
2153
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2154
                           int next_eip_addend)
2155
{
2156
    int gate_cs, type;
2157
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2158
    target_ulong next_eip;
2159

    
2160
    if ((new_cs & 0xfffc) == 0)
2161
        raise_exception_err(EXCP0D_GPF, 0);
2162
    if (load_segment(&e1, &e2, new_cs) != 0)
2163
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2164
    cpl = env->hflags & HF_CPL_MASK;
2165
    if (e2 & DESC_S_MASK) {
2166
        if (!(e2 & DESC_CS_MASK))
2167
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2168
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2169
        if (e2 & DESC_C_MASK) {
2170
            /* conforming code segment */
2171
            if (dpl > cpl)
2172
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2173
        } else {
2174
            /* non conforming code segment */
2175
            rpl = new_cs & 3;
2176
            if (rpl > cpl)
2177
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178
            if (dpl != cpl)
2179
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2180
        }
2181
        if (!(e2 & DESC_P_MASK))
2182
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2183
        limit = get_seg_limit(e1, e2);
2184
        if (new_eip > limit &&
2185
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2186
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2187
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2188
                       get_seg_base(e1, e2), limit, e2);
2189
        EIP = new_eip;
2190
    } else {
2191
        /* jump to call or task gate */
2192
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2193
        rpl = new_cs & 3;
2194
        cpl = env->hflags & HF_CPL_MASK;
2195
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2196
        switch(type) {
2197
        case 1: /* 286 TSS */
2198
        case 9: /* 386 TSS */
2199
        case 5: /* task gate */
2200
            if (dpl < cpl || dpl < rpl)
2201
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202
            next_eip = env->eip + next_eip_addend;
2203
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2204
            CC_OP = CC_OP_EFLAGS;
2205
            break;
2206
        case 4: /* 286 call gate */
2207
        case 12: /* 386 call gate */
2208
            if ((dpl < cpl) || (dpl < rpl))
2209
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210
            if (!(e2 & DESC_P_MASK))
2211
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2212
            gate_cs = e1 >> 16;
2213
            new_eip = (e1 & 0xffff);
2214
            if (type == 12)
2215
                new_eip |= (e2 & 0xffff0000);
2216
            if (load_segment(&e1, &e2, gate_cs) != 0)
2217
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2218
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2219
            /* must be code segment */
2220
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2221
                 (DESC_S_MASK | DESC_CS_MASK)))
2222
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2223
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2224
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2225
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2226
            if (!(e2 & DESC_P_MASK))
2227
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2228
            limit = get_seg_limit(e1, e2);
2229
            if (new_eip > limit)
2230
                raise_exception_err(EXCP0D_GPF, 0);
2231
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2232
                                   get_seg_base(e1, e2), limit, e2);
2233
            EIP = new_eip;
2234
            break;
2235
        default:
2236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
            break;
2238
        }
2239
    }
2240
}
2241

    
2242
/* real mode call */
2243
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2244
                       int shift, int next_eip)
2245
{
2246
    int new_eip;
2247
    uint32_t esp, esp_mask;
2248
    target_ulong ssp;
2249

    
2250
    new_eip = new_eip1;
2251
    esp = ESP;
2252
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2253
    ssp = env->segs[R_SS].base;
2254
    if (shift) {
2255
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2256
        PUSHL(ssp, esp, esp_mask, next_eip);
2257
    } else {
2258
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2259
        PUSHW(ssp, esp, esp_mask, next_eip);
2260
    }
2261

    
2262
    SET_ESP(esp, esp_mask);
2263
    env->eip = new_eip;
2264
    env->segs[R_CS].selector = new_cs;
2265
    env->segs[R_CS].base = (new_cs << 4);
2266
}
2267

    
2268
/* protected mode call */
2269
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2270
                            int shift, int next_eip_addend)
2271
{
2272
    int new_stack, i;
2273
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2274
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2275
    uint32_t val, limit, old_sp_mask;
2276
    target_ulong ssp, old_ssp, next_eip;
2277

    
2278
    next_eip = env->eip + next_eip_addend;
2279
#ifdef DEBUG_PCALL
2280
    if (loglevel & CPU_LOG_PCALL) {
2281
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2282
                new_cs, (uint32_t)new_eip, shift);
2283
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2284
    }
2285
#endif
2286
    if ((new_cs & 0xfffc) == 0)
2287
        raise_exception_err(EXCP0D_GPF, 0);
2288
    if (load_segment(&e1, &e2, new_cs) != 0)
2289
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2290
    cpl = env->hflags & HF_CPL_MASK;
2291
#ifdef DEBUG_PCALL
2292
    if (loglevel & CPU_LOG_PCALL) {
2293
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2294
    }
2295
#endif
2296
    if (e2 & DESC_S_MASK) {
2297
        if (!(e2 & DESC_CS_MASK))
2298
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2299
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2300
        if (e2 & DESC_C_MASK) {
2301
            /* conforming code segment */
2302
            if (dpl > cpl)
2303
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2304
        } else {
2305
            /* non conforming code segment */
2306
            rpl = new_cs & 3;
2307
            if (rpl > cpl)
2308
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2309
            if (dpl != cpl)
2310
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2311
        }
2312
        if (!(e2 & DESC_P_MASK))
2313
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2314

    
2315
#ifdef TARGET_X86_64
2316
        /* XXX: check 16/32 bit cases in long mode */
2317
        if (shift == 2) {
2318
            target_ulong rsp;
2319
            /* 64 bit case */
2320
            rsp = ESP;
2321
            PUSHQ(rsp, env->segs[R_CS].selector);
2322
            PUSHQ(rsp, next_eip);
2323
            /* from this point, not restartable */
2324
            ESP = rsp;
2325
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2326
                                   get_seg_base(e1, e2),
2327
                                   get_seg_limit(e1, e2), e2);
2328
            EIP = new_eip;
2329
        } else
2330
#endif
2331
        {
2332
            sp = ESP;
2333
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2334
            ssp = env->segs[R_SS].base;
2335
            if (shift) {
2336
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2337
                PUSHL(ssp, sp, sp_mask, next_eip);
2338
            } else {
2339
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2340
                PUSHW(ssp, sp, sp_mask, next_eip);
2341
            }
2342

    
2343
            limit = get_seg_limit(e1, e2);
2344
            if (new_eip > limit)
2345
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346
            /* from this point, not restartable */
2347
            SET_ESP(sp, sp_mask);
2348
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2349
                                   get_seg_base(e1, e2), limit, e2);
2350
            EIP = new_eip;
2351
        }
2352
    } else {
2353
        /* check gate type */
2354
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2355
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2356
        rpl = new_cs & 3;
2357
        switch(type) {
2358
        case 1: /* available 286 TSS */
2359
        case 9: /* available 386 TSS */
2360
        case 5: /* task gate */
2361
            if (dpl < cpl || dpl < rpl)
2362
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2363
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2364
            CC_OP = CC_OP_EFLAGS;
2365
            return;
2366
        case 4: /* 286 call gate */
2367
        case 12: /* 386 call gate */
2368
            break;
2369
        default:
2370
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371
            break;
2372
        }
2373
        shift = type >> 3;
2374

    
2375
        if (dpl < cpl || dpl < rpl)
2376
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377
        /* check valid bit */
2378
        if (!(e2 & DESC_P_MASK))
2379
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2380
        selector = e1 >> 16;
2381
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2382
        param_count = e2 & 0x1f;
2383
        if ((selector & 0xfffc) == 0)
2384
            raise_exception_err(EXCP0D_GPF, 0);
2385

    
2386
        if (load_segment(&e1, &e2, selector) != 0)
2387
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2388
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2389
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2390
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
        if (dpl > cpl)
2392
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2393
        if (!(e2 & DESC_P_MASK))
2394
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2395

    
2396
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2397
            /* to inner privilege */
2398
            get_ss_esp_from_tss(&ss, &sp, dpl);
2399
#ifdef DEBUG_PCALL
2400
            if (loglevel & CPU_LOG_PCALL)
2401
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2402
                        ss, sp, param_count, ESP);
2403
#endif
2404
            if ((ss & 0xfffc) == 0)
2405
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2406
            if ((ss & 3) != dpl)
2407
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2408
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2409
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2411
            if (ss_dpl != dpl)
2412
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2413
            if (!(ss_e2 & DESC_S_MASK) ||
2414
                (ss_e2 & DESC_CS_MASK) ||
2415
                !(ss_e2 & DESC_W_MASK))
2416
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2417
            if (!(ss_e2 & DESC_P_MASK))
2418
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2419

    
2420
            //            push_size = ((param_count * 2) + 8) << shift;
2421

    
2422
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2423
            old_ssp = env->segs[R_SS].base;
2424

    
2425
            sp_mask = get_sp_mask(ss_e2);
2426
            ssp = get_seg_base(ss_e1, ss_e2);
2427
            if (shift) {
2428
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2429
                PUSHL(ssp, sp, sp_mask, ESP);
2430
                for(i = param_count - 1; i >= 0; i--) {
2431
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2432
                    PUSHL(ssp, sp, sp_mask, val);
2433
                }
2434
            } else {
2435
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2436
                PUSHW(ssp, sp, sp_mask, ESP);
2437
                for(i = param_count - 1; i >= 0; i--) {
2438
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2439
                    PUSHW(ssp, sp, sp_mask, val);
2440
                }
2441
            }
2442
            new_stack = 1;
2443
        } else {
2444
            /* to same privilege */
2445
            sp = ESP;
2446
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2447
            ssp = env->segs[R_SS].base;
2448
            //            push_size = (4 << shift);
2449
            new_stack = 0;
2450
        }
2451

    
2452
        if (shift) {
2453
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2454
            PUSHL(ssp, sp, sp_mask, next_eip);
2455
        } else {
2456
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2457
            PUSHW(ssp, sp, sp_mask, next_eip);
2458
        }
2459

    
2460
        /* from this point, not restartable */
2461

    
2462
        if (new_stack) {
2463
            ss = (ss & ~3) | dpl;
2464
            cpu_x86_load_seg_cache(env, R_SS, ss,
2465
                                   ssp,
2466
                                   get_seg_limit(ss_e1, ss_e2),
2467
                                   ss_e2);
2468
        }
2469

    
2470
        selector = (selector & ~3) | dpl;
2471
        cpu_x86_load_seg_cache(env, R_CS, selector,
2472
                       get_seg_base(e1, e2),
2473
                       get_seg_limit(e1, e2),
2474
                       e2);
2475
        cpu_x86_set_cpl(env, dpl);
2476
        SET_ESP(sp, sp_mask);
2477
        EIP = offset;
2478
    }
2479
#ifdef USE_KQEMU
2480
    if (kqemu_is_ok(env)) {
2481
        env->exception_index = -1;
2482
        cpu_loop_exit();
2483
    }
2484
#endif
2485
}
2486

    
2487
/* real and vm86 mode iret */
2488
void helper_iret_real(int shift)
2489
{
2490
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2491
    target_ulong ssp;
2492
    int eflags_mask;
2493

    
2494
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2495
    sp = ESP;
2496
    ssp = env->segs[R_SS].base;
2497
    if (shift == 1) {
2498
        /* 32 bits */
2499
        POPL(ssp, sp, sp_mask, new_eip);
2500
        POPL(ssp, sp, sp_mask, new_cs);
2501
        new_cs &= 0xffff;
2502
        POPL(ssp, sp, sp_mask, new_eflags);
2503
    } else {
2504
        /* 16 bits */
2505
        POPW(ssp, sp, sp_mask, new_eip);
2506
        POPW(ssp, sp, sp_mask, new_cs);
2507
        POPW(ssp, sp, sp_mask, new_eflags);
2508
    }
2509
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2510
    env->segs[R_CS].selector = new_cs;
2511
    env->segs[R_CS].base = (new_cs << 4);
2512
    env->eip = new_eip;
2513
    if (env->eflags & VM_MASK)
2514
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2515
    else
2516
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2517
    if (shift == 0)
2518
        eflags_mask &= 0xffff;
2519
    load_eflags(new_eflags, eflags_mask);
2520
    env->hflags2 &= ~HF2_NMI_MASK;
2521
}
2522

    
2523
static inline void validate_seg(int seg_reg, int cpl)
2524
{
2525
    int dpl;
2526
    uint32_t e2;
2527

    
2528
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2529
       they may still contain a valid base. I would be interested to
2530
       know how a real x86_64 CPU behaves */
2531
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2532
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2533
        return;
2534

    
2535
    e2 = env->segs[seg_reg].flags;
2536
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2537
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2538
        /* data or non conforming code segment */
2539
        if (dpl < cpl) {
2540
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2541
        }
2542
    }
2543
}
2544

    
2545
/* protected mode iret */
2546
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2547
{
2548
    uint32_t new_cs, new_eflags, new_ss;
2549
    uint32_t new_es, new_ds, new_fs, new_gs;
2550
    uint32_t e1, e2, ss_e1, ss_e2;
2551
    int cpl, dpl, rpl, eflags_mask, iopl;
2552
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2553

    
2554
#ifdef TARGET_X86_64
2555
    if (shift == 2)
2556
        sp_mask = -1;
2557
    else
2558
#endif
2559
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2560
    sp = ESP;
2561
    ssp = env->segs[R_SS].base;
2562
    new_eflags = 0; /* avoid warning */
2563
#ifdef TARGET_X86_64
2564
    if (shift == 2) {
2565
        POPQ(sp, new_eip);
2566
        POPQ(sp, new_cs);
2567
        new_cs &= 0xffff;
2568
        if (is_iret) {
2569
            POPQ(sp, new_eflags);
2570
        }
2571
    } else
2572
#endif
2573
    if (shift == 1) {
2574
        /* 32 bits */
2575
        POPL(ssp, sp, sp_mask, new_eip);
2576
        POPL(ssp, sp, sp_mask, new_cs);
2577
        new_cs &= 0xffff;
2578
        if (is_iret) {
2579
            POPL(ssp, sp, sp_mask, new_eflags);
2580
            if (new_eflags & VM_MASK)
2581
                goto return_to_vm86;
2582
        }
2583
    } else {
2584
        /* 16 bits */
2585
        POPW(ssp, sp, sp_mask, new_eip);
2586
        POPW(ssp, sp, sp_mask, new_cs);
2587
        if (is_iret)
2588
            POPW(ssp, sp, sp_mask, new_eflags);
2589
    }
2590
#ifdef DEBUG_PCALL
2591
    if (loglevel & CPU_LOG_PCALL) {
2592
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2593
                new_cs, new_eip, shift, addend);
2594
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2595
    }
2596
#endif
2597
    if ((new_cs & 0xfffc) == 0)
2598
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2599
    if (load_segment(&e1, &e2, new_cs) != 0)
2600
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2601
    if (!(e2 & DESC_S_MASK) ||
2602
        !(e2 & DESC_CS_MASK))
2603
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2604
    cpl = env->hflags & HF_CPL_MASK;
2605
    rpl = new_cs & 3;
2606
    if (rpl < cpl)
2607
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2608
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2609
    if (e2 & DESC_C_MASK) {
2610
        if (dpl > rpl)
2611
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2612
    } else {
2613
        if (dpl != rpl)
2614
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2615
    }
2616
    if (!(e2 & DESC_P_MASK))
2617
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2618

    
2619
    sp += addend;
2620
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2621
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2622
        /* return to same privilege level */
2623
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2624
                       get_seg_base(e1, e2),
2625
                       get_seg_limit(e1, e2),
2626
                       e2);
2627
    } else {
2628
        /* return to different privilege level */
2629
#ifdef TARGET_X86_64
2630
        if (shift == 2) {
2631
            POPQ(sp, new_esp);
2632
            POPQ(sp, new_ss);
2633
            new_ss &= 0xffff;
2634
        } else
2635
#endif
2636
        if (shift == 1) {
2637
            /* 32 bits */
2638
            POPL(ssp, sp, sp_mask, new_esp);
2639
            POPL(ssp, sp, sp_mask, new_ss);
2640
            new_ss &= 0xffff;
2641
        } else {
2642
            /* 16 bits */
2643
            POPW(ssp, sp, sp_mask, new_esp);
2644
            POPW(ssp, sp, sp_mask, new_ss);
2645
        }
2646
#ifdef DEBUG_PCALL
2647
        if (loglevel & CPU_LOG_PCALL) {
2648
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2649
                    new_ss, new_esp);
2650
        }
2651
#endif
2652
        if ((new_ss & 0xfffc) == 0) {
2653
#ifdef TARGET_X86_64
2654
            /* NULL ss is allowed in long mode if cpl != 3*/
2655
            /* XXX: test CS64 ? */
2656
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2657
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2658
                                       0, 0xffffffff,
2659
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2660
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2661
                                       DESC_W_MASK | DESC_A_MASK);
2662
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2663
            } else
2664
#endif
2665
            {
2666
                raise_exception_err(EXCP0D_GPF, 0);
2667
            }
2668
        } else {
2669
            if ((new_ss & 3) != rpl)
2670
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2671
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2672
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2673
            if (!(ss_e2 & DESC_S_MASK) ||
2674
                (ss_e2 & DESC_CS_MASK) ||
2675
                !(ss_e2 & DESC_W_MASK))
2676
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2677
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2678
            if (dpl != rpl)
2679
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2680
            if (!(ss_e2 & DESC_P_MASK))
2681
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2682
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2683
                                   get_seg_base(ss_e1, ss_e2),
2684
                                   get_seg_limit(ss_e1, ss_e2),
2685
                                   ss_e2);
2686
        }
2687

    
2688
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2689
                       get_seg_base(e1, e2),
2690
                       get_seg_limit(e1, e2),
2691
                       e2);
2692
        cpu_x86_set_cpl(env, rpl);
2693
        sp = new_esp;
2694
#ifdef TARGET_X86_64
2695
        if (env->hflags & HF_CS64_MASK)
2696
            sp_mask = -1;
2697
        else
2698
#endif
2699
            sp_mask = get_sp_mask(ss_e2);
2700

    
2701
        /* validate data segments */
2702
        validate_seg(R_ES, rpl);
2703
        validate_seg(R_DS, rpl);
2704
        validate_seg(R_FS, rpl);
2705
        validate_seg(R_GS, rpl);
2706

    
2707
        sp += addend;
2708
    }
2709
    SET_ESP(sp, sp_mask);
2710
    env->eip = new_eip;
2711
    if (is_iret) {
2712
        /* NOTE: 'cpl' is the _old_ CPL */
2713
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2714
        if (cpl == 0)
2715
            eflags_mask |= IOPL_MASK;
2716
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2717
        if (cpl <= iopl)
2718
            eflags_mask |= IF_MASK;
2719
        if (shift == 0)
2720
            eflags_mask &= 0xffff;
2721
        load_eflags(new_eflags, eflags_mask);
2722
    }
2723
    return;
2724

    
2725
 return_to_vm86:
2726
    POPL(ssp, sp, sp_mask, new_esp);
2727
    POPL(ssp, sp, sp_mask, new_ss);
2728
    POPL(ssp, sp, sp_mask, new_es);
2729
    POPL(ssp, sp, sp_mask, new_ds);
2730
    POPL(ssp, sp, sp_mask, new_fs);
2731
    POPL(ssp, sp, sp_mask, new_gs);
2732

    
2733
    /* modify processor state */
2734
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2735
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2736
    load_seg_vm(R_CS, new_cs & 0xffff);
2737
    cpu_x86_set_cpl(env, 3);
2738
    load_seg_vm(R_SS, new_ss & 0xffff);
2739
    load_seg_vm(R_ES, new_es & 0xffff);
2740
    load_seg_vm(R_DS, new_ds & 0xffff);
2741
    load_seg_vm(R_FS, new_fs & 0xffff);
2742
    load_seg_vm(R_GS, new_gs & 0xffff);
2743

    
2744
    env->eip = new_eip & 0xffff;
2745
    ESP = new_esp;
2746
}
2747

    
2748
void helper_iret_protected(int shift, int next_eip)
2749
{
2750
    int tss_selector, type;
2751
    uint32_t e1, e2;
2752

    
2753
    /* specific case for TSS */
2754
    if (env->eflags & NT_MASK) {
2755
#ifdef TARGET_X86_64
2756
        if (env->hflags & HF_LMA_MASK)
2757
            raise_exception_err(EXCP0D_GPF, 0);
2758
#endif
2759
        tss_selector = lduw_kernel(env->tr.base + 0);
2760
        if (tss_selector & 4)
2761
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2762
        if (load_segment(&e1, &e2, tss_selector) != 0)
2763
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2764
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2765
        /* NOTE: we check both segment and busy TSS */
2766
        if (type != 3)
2767
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2768
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2769
    } else {
2770
        helper_ret_protected(shift, 1, 0);
2771
    }
2772
    env->hflags2 &= ~HF2_NMI_MASK;
2773
#ifdef USE_KQEMU
2774
    if (kqemu_is_ok(env)) {
2775
        CC_OP = CC_OP_EFLAGS;
2776
        env->exception_index = -1;
2777
        cpu_loop_exit();
2778
    }
2779
#endif
2780
}
2781

    
2782
void helper_lret_protected(int shift, int addend)
2783
{
2784
    helper_ret_protected(shift, 0, addend);
2785
#ifdef USE_KQEMU
2786
    if (kqemu_is_ok(env)) {
2787
        env->exception_index = -1;
2788
        cpu_loop_exit();
2789
    }
2790
#endif
2791
}
2792

    
2793
void helper_sysenter(void)
2794
{
2795
    if (env->sysenter_cs == 0) {
2796
        raise_exception_err(EXCP0D_GPF, 0);
2797
    }
2798
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2799
    cpu_x86_set_cpl(env, 0);
2800

    
2801
#ifdef TARGET_X86_64
2802
    if (env->hflags & HF_LMA_MASK) {
2803
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2804
                               0, 0xffffffff,
2805
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2806
                               DESC_S_MASK |
2807
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2808
    } else
2809
#endif
2810
    {
2811
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2812
                               0, 0xffffffff,
2813
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2814
                               DESC_S_MASK |
2815
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2816
    }
2817
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2818
                           0, 0xffffffff,
2819
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2820
                           DESC_S_MASK |
2821
                           DESC_W_MASK | DESC_A_MASK);
2822
    ESP = env->sysenter_esp;
2823
    EIP = env->sysenter_eip;
2824
}
2825

    
2826
void helper_sysexit(int dflag)
2827
{
2828
    int cpl;
2829

    
2830
    cpl = env->hflags & HF_CPL_MASK;
2831
    if (env->sysenter_cs == 0 || cpl != 0) {
2832
        raise_exception_err(EXCP0D_GPF, 0);
2833
    }
2834
    cpu_x86_set_cpl(env, 3);
2835
#ifdef TARGET_X86_64
2836
    if (dflag == 2) {
2837
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2838
                               0, 0xffffffff,
2839
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2842
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_W_MASK | DESC_A_MASK);
2847
    } else
2848
#endif
2849
    {
2850
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2851
                               0, 0xffffffff,
2852
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2855
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_W_MASK | DESC_A_MASK);
2860
    }
2861
    ESP = ECX;
2862
    EIP = EDX;
2863
#ifdef USE_KQEMU
2864
    if (kqemu_is_ok(env)) {
2865
        env->exception_index = -1;
2866
        cpu_loop_exit();
2867
    }
2868
#endif
2869
}
2870

    
2871
#if defined(CONFIG_USER_ONLY)
2872
target_ulong helper_read_crN(int reg)
2873
{
2874
    return 0;
2875
}
2876

    
2877
void helper_write_crN(int reg, target_ulong t0)
2878
{
2879
}
2880

    
2881
void helper_movl_drN_T0(int reg, target_ulong t0)
2882
{
2883
}
2884
#else
2885
target_ulong helper_read_crN(int reg)
2886
{
2887
    target_ulong val;
2888

    
2889
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2890
    switch(reg) {
2891
    default:
2892
        val = env->cr[reg];
2893
        break;
2894
    case 8:
2895
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2896
            val = cpu_get_apic_tpr(env);
2897
        } else {
2898
            val = env->v_tpr;
2899
        }
2900
        break;
2901
    }
2902
    return val;
2903
}
2904

    
2905
void helper_write_crN(int reg, target_ulong t0)
2906
{
2907
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2908
    switch(reg) {
2909
    case 0:
2910
        cpu_x86_update_cr0(env, t0);
2911
        break;
2912
    case 3:
2913
        cpu_x86_update_cr3(env, t0);
2914
        break;
2915
    case 4:
2916
        cpu_x86_update_cr4(env, t0);
2917
        break;
2918
    case 8:
2919
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2920
            cpu_set_apic_tpr(env, t0);
2921
        }
2922
        env->v_tpr = t0 & 0x0f;
2923
        break;
2924
    default:
2925
        env->cr[reg] = t0;
2926
        break;
2927
    }
2928
}
2929

    
2930
void helper_movl_drN_T0(int reg, target_ulong t0)
2931
{
2932
    int i;
2933

    
2934
    if (reg < 4) {
2935
        hw_breakpoint_remove(env, reg);
2936
        env->dr[reg] = t0;
2937
        hw_breakpoint_insert(env, reg);
2938
    } else if (reg == 7) {
2939
        for (i = 0; i < 4; i++)
2940
            hw_breakpoint_remove(env, i);
2941
        env->dr[7] = t0;
2942
        for (i = 0; i < 4; i++)
2943
            hw_breakpoint_insert(env, i);
2944
    } else
2945
        env->dr[reg] = t0;
2946
}
2947
#endif
2948

    
2949
void helper_lmsw(target_ulong t0)
2950
{
2951
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2952
       if already set to one. */
2953
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2954
    helper_write_crN(0, t0);
2955
}
2956

    
2957
void helper_clts(void)
2958
{
2959
    env->cr[0] &= ~CR0_TS_MASK;
2960
    env->hflags &= ~HF_TS_MASK;
2961
}
2962

    
2963
void helper_invlpg(target_ulong addr)
2964
{
2965
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2966
    tlb_flush_page(env, addr);
2967
}
2968

    
2969
void helper_rdtsc(void)
2970
{
2971
    uint64_t val;
2972

    
2973
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2974
        raise_exception(EXCP0D_GPF);
2975
    }
2976
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2977

    
2978
    val = cpu_get_tsc(env) + env->tsc_offset;
2979
    EAX = (uint32_t)(val);
2980
    EDX = (uint32_t)(val >> 32);
2981
}
2982

    
2983
void helper_rdpmc(void)
2984
{
2985
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2986
        raise_exception(EXCP0D_GPF);
2987
    }
2988
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2989
    
2990
    /* currently unimplemented */
2991
    raise_exception_err(EXCP06_ILLOP, 0);
2992
}
2993

    
2994
#if defined(CONFIG_USER_ONLY)
2995
void helper_wrmsr(void)
2996
{
2997
}
2998

    
2999
void helper_rdmsr(void)
3000
{
3001
}
3002
#else
3003
void helper_wrmsr(void)
3004
{
3005
    uint64_t val;
3006

    
3007
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3008

    
3009
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3010

    
3011
    switch((uint32_t)ECX) {
3012
    case MSR_IA32_SYSENTER_CS:
3013
        env->sysenter_cs = val & 0xffff;
3014
        break;
3015
    case MSR_IA32_SYSENTER_ESP:
3016
        env->sysenter_esp = val;
3017
        break;
3018
    case MSR_IA32_SYSENTER_EIP:
3019
        env->sysenter_eip = val;
3020
        break;
3021
    case MSR_IA32_APICBASE:
3022
        cpu_set_apic_base(env, val);
3023
        break;
3024
    case MSR_EFER:
3025
        {
3026
            uint64_t update_mask;
3027
            update_mask = 0;
3028
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3029
                update_mask |= MSR_EFER_SCE;
3030
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3031
                update_mask |= MSR_EFER_LME;
3032
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3033
                update_mask |= MSR_EFER_FFXSR;
3034
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3035
                update_mask |= MSR_EFER_NXE;
3036
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3037
                update_mask |= MSR_EFER_SVME;
3038
            cpu_load_efer(env, (env->efer & ~update_mask) |
3039
                          (val & update_mask));
3040
        }
3041
        break;
3042
    case MSR_STAR:
3043
        env->star = val;
3044
        break;
3045
    case MSR_PAT:
3046
        env->pat = val;
3047
        break;
3048
    case MSR_VM_HSAVE_PA:
3049
        env->vm_hsave = val;
3050
        break;
3051
#ifdef TARGET_X86_64
3052
    case MSR_LSTAR:
3053
        env->lstar = val;
3054
        break;
3055
    case MSR_CSTAR:
3056
        env->cstar = val;
3057
        break;
3058
    case MSR_FMASK:
3059
        env->fmask = val;
3060
        break;
3061
    case MSR_FSBASE:
3062
        env->segs[R_FS].base = val;
3063
        break;
3064
    case MSR_GSBASE:
3065
        env->segs[R_GS].base = val;
3066
        break;
3067
    case MSR_KERNELGSBASE:
3068
        env->kernelgsbase = val;
3069
        break;
3070
#endif
3071
    default:
3072
        /* XXX: exception ? */
3073
        break;
3074
    }
3075
}
3076

    
3077
void helper_rdmsr(void)
3078
{
3079
    uint64_t val;
3080

    
3081
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3082

    
3083
    switch((uint32_t)ECX) {
3084
    case MSR_IA32_SYSENTER_CS:
3085
        val = env->sysenter_cs;
3086
        break;
3087
    case MSR_IA32_SYSENTER_ESP:
3088
        val = env->sysenter_esp;
3089
        break;
3090
    case MSR_IA32_SYSENTER_EIP:
3091
        val = env->sysenter_eip;
3092
        break;
3093
    case MSR_IA32_APICBASE:
3094
        val = cpu_get_apic_base(env);
3095
        break;
3096
    case MSR_EFER:
3097
        val = env->efer;
3098
        break;
3099
    case MSR_STAR:
3100
        val = env->star;
3101
        break;
3102
    case MSR_PAT:
3103
        val = env->pat;
3104
        break;
3105
    case MSR_VM_HSAVE_PA:
3106
        val = env->vm_hsave;
3107
        break;
3108
    case MSR_IA32_PERF_STATUS:
3109
        /* tsc_increment_by_tick */
3110
        val = 1000ULL;
3111
        /* CPU multiplier */
3112
        val |= (((uint64_t)4ULL) << 40);
3113
        break;
3114
#ifdef TARGET_X86_64
3115
    case MSR_LSTAR:
3116
        val = env->lstar;
3117
        break;
3118
    case MSR_CSTAR:
3119
        val = env->cstar;
3120
        break;
3121
    case MSR_FMASK:
3122
        val = env->fmask;
3123
        break;
3124
    case MSR_FSBASE:
3125
        val = env->segs[R_FS].base;
3126
        break;
3127
    case MSR_GSBASE:
3128
        val = env->segs[R_GS].base;
3129
        break;
3130
    case MSR_KERNELGSBASE:
3131
        val = env->kernelgsbase;
3132
        break;
3133
#endif
3134
#ifdef USE_KQEMU
3135
    case MSR_QPI_COMMBASE:
3136
        if (env->kqemu_enabled) {
3137
            val = kqemu_comm_base;
3138
        } else {
3139
            val = 0;
3140
        }
3141
        break;
3142
#endif
3143
    default:
3144
        /* XXX: exception ? */
3145
        val = 0;
3146
        break;
3147
    }
3148
    EAX = (uint32_t)(val);
3149
    EDX = (uint32_t)(val >> 32);
3150
}
3151
#endif
3152

    
3153
target_ulong helper_lsl(target_ulong selector1)
3154
{
3155
    unsigned int limit;
3156
    uint32_t e1, e2, eflags, selector;
3157
    int rpl, dpl, cpl, type;
3158

    
3159
    selector = selector1 & 0xffff;
3160
    eflags = helper_cc_compute_all(CC_OP);
3161
    if (load_segment(&e1, &e2, selector) != 0)
3162
        goto fail;
3163
    rpl = selector & 3;
3164
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3165
    cpl = env->hflags & HF_CPL_MASK;
3166
    if (e2 & DESC_S_MASK) {
3167
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3168
            /* conforming */
3169
        } else {
3170
            if (dpl < cpl || dpl < rpl)
3171
                goto fail;
3172
        }
3173
    } else {
3174
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3175
        switch(type) {
3176
        case 1:
3177
        case 2:
3178
        case 3:
3179
        case 9:
3180
        case 11:
3181
            break;
3182
        default:
3183
            goto fail;
3184
        }
3185
        if (dpl < cpl || dpl < rpl) {
3186
        fail:
3187
            CC_SRC = eflags & ~CC_Z;
3188
            return 0;
3189
        }
3190
    }
3191
    limit = get_seg_limit(e1, e2);
3192
    CC_SRC = eflags | CC_Z;
3193
    return limit;
3194
}
3195

    
3196
target_ulong helper_lar(target_ulong selector1)
3197
{
3198
    uint32_t e1, e2, eflags, selector;
3199
    int rpl, dpl, cpl, type;
3200

    
3201
    selector = selector1 & 0xffff;
3202
    eflags = helper_cc_compute_all(CC_OP);
3203
    if ((selector & 0xfffc) == 0)
3204
        goto fail;
3205
    if (load_segment(&e1, &e2, selector) != 0)
3206
        goto fail;
3207
    rpl = selector & 3;
3208
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3209
    cpl = env->hflags & HF_CPL_MASK;
3210
    if (e2 & DESC_S_MASK) {
3211
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3212
            /* conforming */
3213
        } else {
3214
            if (dpl < cpl || dpl < rpl)
3215
                goto fail;
3216
        }
3217
    } else {
3218
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3219
        switch(type) {
3220
        case 1:
3221
        case 2:
3222
        case 3:
3223
        case 4:
3224
        case 5:
3225
        case 9:
3226
        case 11:
3227
        case 12:
3228
            break;
3229
        default:
3230
            goto fail;
3231
        }
3232
        if (dpl < cpl || dpl < rpl) {
3233
        fail:
3234
            CC_SRC = eflags & ~CC_Z;
3235
            return 0;
3236
        }
3237
    }
3238
    CC_SRC = eflags | CC_Z;
3239
    return e2 & 0x00f0ff00;
3240
}
3241

    
3242
void helper_verr(target_ulong selector1)
3243
{
3244
    uint32_t e1, e2, eflags, selector;
3245
    int rpl, dpl, cpl;
3246

    
3247
    selector = selector1 & 0xffff;
3248
    eflags = helper_cc_compute_all(CC_OP);
3249
    if ((selector & 0xfffc) == 0)
3250
        goto fail;
3251
    if (load_segment(&e1, &e2, selector) != 0)
3252
        goto fail;
3253
    if (!(e2 & DESC_S_MASK))
3254
        goto fail;
3255
    rpl = selector & 3;
3256
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3257
    cpl = env->hflags & HF_CPL_MASK;
3258
    if (e2 & DESC_CS_MASK) {
3259
        if (!(e2 & DESC_R_MASK))
3260
            goto fail;
3261
        if (!(e2 & DESC_C_MASK)) {
3262
            if (dpl < cpl || dpl < rpl)
3263
                goto fail;
3264
        }
3265
    } else {
3266
        if (dpl < cpl || dpl < rpl) {
3267
        fail:
3268
            CC_SRC = eflags & ~CC_Z;
3269
            return;
3270
        }
3271
    }
3272
    CC_SRC = eflags | CC_Z;
3273
}
3274

    
3275
void helper_verw(target_ulong selector1)
3276
{
3277
    uint32_t e1, e2, eflags, selector;
3278
    int rpl, dpl, cpl;
3279

    
3280
    selector = selector1 & 0xffff;
3281
    eflags = helper_cc_compute_all(CC_OP);
3282
    if ((selector & 0xfffc) == 0)
3283
        goto fail;
3284
    if (load_segment(&e1, &e2, selector) != 0)
3285
        goto fail;
3286
    if (!(e2 & DESC_S_MASK))
3287
        goto fail;
3288
    rpl = selector & 3;
3289
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3290
    cpl = env->hflags & HF_CPL_MASK;
3291
    if (e2 & DESC_CS_MASK) {
3292
        goto fail;
3293
    } else {
3294
        if (dpl < cpl || dpl < rpl)
3295
            goto fail;
3296
        if (!(e2 & DESC_W_MASK)) {
3297
        fail:
3298
            CC_SRC = eflags & ~CC_Z;
3299
            return;
3300
        }
3301
    }
3302
    CC_SRC = eflags | CC_Z;
3303
}
3304

    
3305
/* x87 FPU helpers */
3306

    
3307
static void fpu_set_exception(int mask)
3308
{
3309
    env->fpus |= mask;
3310
    if (env->fpus & (~env->fpuc & FPUC_EM))
3311
        env->fpus |= FPUS_SE | FPUS_B;
3312
}
3313

    
3314
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3315
{
3316
    if (b == 0.0)
3317
        fpu_set_exception(FPUS_ZE);
3318
    return a / b;
3319
}
3320

    
3321
static void fpu_raise_exception(void)
3322
{
3323
    if (env->cr[0] & CR0_NE_MASK) {
3324
        raise_exception(EXCP10_COPR);
3325
    }
3326
#if !defined(CONFIG_USER_ONLY)
3327
    else {
3328
        cpu_set_ferr(env);
3329
    }
3330
#endif
3331
}
3332

    
3333
void helper_flds_FT0(uint32_t val)
3334
{
3335
    union {
3336
        float32 f;
3337
        uint32_t i;
3338
    } u;
3339
    u.i = val;
3340
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3341
}
3342

    
3343
void helper_fldl_FT0(uint64_t val)
3344
{
3345
    union {
3346
        float64 f;
3347
        uint64_t i;
3348
    } u;
3349
    u.i = val;
3350
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3351
}
3352

    
3353
void helper_fildl_FT0(int32_t val)
3354
{
3355
    FT0 = int32_to_floatx(val, &env->fp_status);
3356
}
3357

    
3358
void helper_flds_ST0(uint32_t val)
3359
{
3360
    int new_fpstt;
3361
    union {
3362
        float32 f;
3363
        uint32_t i;
3364
    } u;
3365
    new_fpstt = (env->fpstt - 1) & 7;
3366
    u.i = val;
3367
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3368
    env->fpstt = new_fpstt;
3369
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3370
}
3371

    
3372
void helper_fldl_ST0(uint64_t val)
3373
{
3374
    int new_fpstt;
3375
    union {
3376
        float64 f;
3377
        uint64_t i;
3378
    } u;
3379
    new_fpstt = (env->fpstt - 1) & 7;
3380
    u.i = val;
3381
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3382
    env->fpstt = new_fpstt;
3383
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3384
}
3385

    
3386
void helper_fildl_ST0(int32_t val)
3387
{
3388
    int new_fpstt;
3389
    new_fpstt = (env->fpstt - 1) & 7;
3390
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3391
    env->fpstt = new_fpstt;
3392
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3393
}
3394

    
3395
void helper_fildll_ST0(int64_t val)
3396
{
3397
    int new_fpstt;
3398
    new_fpstt = (env->fpstt - 1) & 7;
3399
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3400
    env->fpstt = new_fpstt;
3401
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3402
}
3403

    
3404
uint32_t helper_fsts_ST0(void)
3405
{
3406
    union {
3407
        float32 f;
3408
        uint32_t i;
3409
    } u;
3410
    u.f = floatx_to_float32(ST0, &env->fp_status);
3411
    return u.i;
3412
}
3413

    
3414
uint64_t helper_fstl_ST0(void)
3415
{
3416
    union {
3417
        float64 f;
3418
        uint64_t i;
3419
    } u;
3420
    u.f = floatx_to_float64(ST0, &env->fp_status);
3421
    return u.i;
3422
}
3423

    
3424
int32_t helper_fist_ST0(void)
3425
{
3426
    int32_t val;
3427
    val = floatx_to_int32(ST0, &env->fp_status);
3428
    if (val != (int16_t)val)
3429
        val = -32768;
3430
    return val;
3431
}
3432

    
3433
int32_t helper_fistl_ST0(void)
3434
{
3435
    int32_t val;
3436
    val = floatx_to_int32(ST0, &env->fp_status);
3437
    return val;
3438
}
3439

    
3440
int64_t helper_fistll_ST0(void)
3441
{
3442
    int64_t val;
3443
    val = floatx_to_int64(ST0, &env->fp_status);
3444
    return val;
3445
}
3446

    
3447
int32_t helper_fistt_ST0(void)
3448
{
3449
    int32_t val;
3450
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3451
    if (val != (int16_t)val)
3452
        val = -32768;
3453
    return val;
3454
}
3455

    
3456
int32_t helper_fisttl_ST0(void)
3457
{
3458
    int32_t val;
3459
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3460
    return val;
3461
}
3462

    
3463
int64_t helper_fisttll_ST0(void)
3464
{
3465
    int64_t val;
3466
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3467
    return val;
3468
}
3469

    
3470
void helper_fldt_ST0(target_ulong ptr)
3471
{
3472
    int new_fpstt;
3473
    new_fpstt = (env->fpstt - 1) & 7;
3474
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3475
    env->fpstt = new_fpstt;
3476
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3477
}
3478

    
3479
void helper_fstt_ST0(target_ulong ptr)
3480
{
3481
    helper_fstt(ST0, ptr);
3482
}
3483

    
3484
void helper_fpush(void)
3485
{
3486
    fpush();
3487
}
3488

    
3489
void helper_fpop(void)
3490
{
3491
    fpop();
3492
}
3493

    
3494
void helper_fdecstp(void)
3495
{
3496
    env->fpstt = (env->fpstt - 1) & 7;
3497
    env->fpus &= (~0x4700);
3498
}
3499

    
3500
void helper_fincstp(void)
3501
{
3502
    env->fpstt = (env->fpstt + 1) & 7;
3503
    env->fpus &= (~0x4700);
3504
}
3505

    
3506
/* FPU move */
3507

    
3508
void helper_ffree_STN(int st_index)
3509
{
3510
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3511
}
3512

    
3513
void helper_fmov_ST0_FT0(void)
3514
{
3515
    ST0 = FT0;
3516
}
3517

    
3518
void helper_fmov_FT0_STN(int st_index)
3519
{
3520
    FT0 = ST(st_index);
3521
}
3522

    
3523
void helper_fmov_ST0_STN(int st_index)
3524
{
3525
    ST0 = ST(st_index);
3526
}
3527

    
3528
void helper_fmov_STN_ST0(int st_index)
3529
{
3530
    ST(st_index) = ST0;
3531
}
3532

    
3533
void helper_fxchg_ST0_STN(int st_index)
3534
{
3535
    CPU86_LDouble tmp;
3536
    tmp = ST(st_index);
3537
    ST(st_index) = ST0;
3538
    ST0 = tmp;
3539
}
3540

    
3541
/* FPU operations */
3542

    
3543
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3544

    
3545
void helper_fcom_ST0_FT0(void)
3546
{
3547
    int ret;
3548

    
3549
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3550
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3551
}
3552

    
3553
void helper_fucom_ST0_FT0(void)
3554
{
3555
    int ret;
3556

    
3557
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3558
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3559
}
3560

    
3561
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3562

    
3563
void helper_fcomi_ST0_FT0(void)
3564
{
3565
    int eflags;
3566
    int ret;
3567

    
3568
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3569
    eflags = helper_cc_compute_all(CC_OP);
3570
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3571
    CC_SRC = eflags;
3572
}
3573

    
3574
void helper_fucomi_ST0_FT0(void)
3575
{
3576
    int eflags;
3577
    int ret;
3578

    
3579
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3580
    eflags = helper_cc_compute_all(CC_OP);
3581
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3582
    CC_SRC = eflags;
3583
}
3584

    
3585
void helper_fadd_ST0_FT0(void)
3586
{
3587
    ST0 += FT0;
3588
}
3589

    
3590
void helper_fmul_ST0_FT0(void)
3591
{
3592
    ST0 *= FT0;
3593
}
3594

    
3595
void helper_fsub_ST0_FT0(void)
3596
{
3597
    ST0 -= FT0;
3598
}
3599

    
3600
void helper_fsubr_ST0_FT0(void)
3601
{
3602
    ST0 = FT0 - ST0;
3603
}
3604

    
3605
void helper_fdiv_ST0_FT0(void)
3606
{
3607
    ST0 = helper_fdiv(ST0, FT0);
3608
}
3609

    
3610
void helper_fdivr_ST0_FT0(void)
3611
{
3612
    ST0 = helper_fdiv(FT0, ST0);
3613
}
3614

    
3615
/* fp operations between STN and ST0 */
3616

    
3617
void helper_fadd_STN_ST0(int st_index)
3618
{
3619
    ST(st_index) += ST0;
3620
}
3621

    
3622
void helper_fmul_STN_ST0(int st_index)
3623
{
3624
    ST(st_index) *= ST0;
3625
}
3626

    
3627
void helper_fsub_STN_ST0(int st_index)
3628
{
3629
    ST(st_index) -= ST0;
3630
}
3631

    
3632
void helper_fsubr_STN_ST0(int st_index)
3633
{
3634
    CPU86_LDouble *p;
3635
    p = &ST(st_index);
3636
    *p = ST0 - *p;
3637
}
3638

    
3639
void helper_fdiv_STN_ST0(int st_index)
3640
{
3641
    CPU86_LDouble *p;
3642
    p = &ST(st_index);
3643
    *p = helper_fdiv(*p, ST0);
3644
}
3645

    
3646
void helper_fdivr_STN_ST0(int st_index)
3647
{
3648
    CPU86_LDouble *p;
3649
    p = &ST(st_index);
3650
    *p = helper_fdiv(ST0, *p);
3651
}
3652

    
3653
/* misc FPU operations */
3654
void helper_fchs_ST0(void)
3655
{
3656
    ST0 = floatx_chs(ST0);
3657
}
3658

    
3659
void helper_fabs_ST0(void)
3660
{
3661
    ST0 = floatx_abs(ST0);
3662
}
3663

    
3664
void helper_fld1_ST0(void)
3665
{
3666
    ST0 = f15rk[1];
3667
}
3668

    
3669
void helper_fldl2t_ST0(void)
3670
{
3671
    ST0 = f15rk[6];
3672
}
3673

    
3674
void helper_fldl2e_ST0(void)
3675
{
3676
    ST0 = f15rk[5];
3677
}
3678

    
3679
void helper_fldpi_ST0(void)
3680
{
3681
    ST0 = f15rk[2];
3682
}
3683

    
3684
void helper_fldlg2_ST0(void)
3685
{
3686
    ST0 = f15rk[3];
3687
}
3688

    
3689
void helper_fldln2_ST0(void)
3690
{
3691
    ST0 = f15rk[4];
3692
}
3693

    
3694
void helper_fldz_ST0(void)
3695
{
3696
    ST0 = f15rk[0];
3697
}
3698

    
3699
void helper_fldz_FT0(void)
3700
{
3701
    FT0 = f15rk[0];
3702
}
3703

    
3704
uint32_t helper_fnstsw(void)
3705
{
3706
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3707
}
3708

    
3709
uint32_t helper_fnstcw(void)
3710
{
3711
    return env->fpuc;
3712
}
3713

    
3714
static void update_fp_status(void)
3715
{
3716
    int rnd_type;
3717

    
3718
    /* set rounding mode */
3719
    switch(env->fpuc & RC_MASK) {
3720
    default:
3721
    case RC_NEAR:
3722
        rnd_type = float_round_nearest_even;
3723
        break;
3724
    case RC_DOWN:
3725
        rnd_type = float_round_down;
3726
        break;
3727
    case RC_UP:
3728
        rnd_type = float_round_up;
3729
        break;
3730
    case RC_CHOP:
3731
        rnd_type = float_round_to_zero;
3732
        break;
3733
    }
3734
    set_float_rounding_mode(rnd_type, &env->fp_status);
3735
#ifdef FLOATX80
3736
    switch((env->fpuc >> 8) & 3) {
3737
    case 0:
3738
        rnd_type = 32;
3739
        break;
3740
    case 2:
3741
        rnd_type = 64;
3742
        break;
3743
    case 3:
3744
    default:
3745
        rnd_type = 80;
3746
        break;
3747
    }
3748
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3749
#endif
3750
}
3751

    
3752
void helper_fldcw(uint32_t val)
3753
{
3754
    env->fpuc = val;
3755
    update_fp_status();
3756
}
3757

    
3758
void helper_fclex(void)
3759
{
3760
    env->fpus &= 0x7f00;
3761
}
3762

    
3763
void helper_fwait(void)
3764
{
3765
    if (env->fpus & FPUS_SE)
3766
        fpu_raise_exception();
3767
}
3768

    
3769
void helper_fninit(void)
3770
{
3771
    env->fpus = 0;
3772
    env->fpstt = 0;
3773
    env->fpuc = 0x37f;
3774
    env->fptags[0] = 1;
3775
    env->fptags[1] = 1;
3776
    env->fptags[2] = 1;
3777
    env->fptags[3] = 1;
3778
    env->fptags[4] = 1;
3779
    env->fptags[5] = 1;
3780
    env->fptags[6] = 1;
3781
    env->fptags[7] = 1;
3782
}
3783

    
3784
/* BCD ops */
3785

    
3786
void helper_fbld_ST0(target_ulong ptr)
3787
{
3788
    CPU86_LDouble tmp;
3789
    uint64_t val;
3790
    unsigned int v;
3791
    int i;
3792

    
3793
    val = 0;
3794
    for(i = 8; i >= 0; i--) {
3795
        v = ldub(ptr + i);
3796
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3797
    }
3798
    tmp = val;
3799
    if (ldub(ptr + 9) & 0x80)
3800
        tmp = -tmp;
3801
    fpush();
3802
    ST0 = tmp;
3803
}
3804

    
3805
void helper_fbst_ST0(target_ulong ptr)
3806
{
3807
    int v;
3808
    target_ulong mem_ref, mem_end;
3809
    int64_t val;
3810

    
3811
    val = floatx_to_int64(ST0, &env->fp_status);
3812
    mem_ref = ptr;
3813
    mem_end = mem_ref + 9;
3814
    if (val < 0) {
3815
        stb(mem_end, 0x80);
3816
        val = -val;
3817
    } else {
3818
        stb(mem_end, 0x00);
3819
    }
3820
    while (mem_ref < mem_end) {
3821
        if (val == 0)
3822
            break;
3823
        v = val % 100;
3824
        val = val / 100;
3825
        v = ((v / 10) << 4) | (v % 10);
3826
        stb(mem_ref++, v);
3827
    }
3828
    while (mem_ref < mem_end) {
3829
        stb(mem_ref++, 0);
3830
    }
3831
}
3832

    
3833
void helper_f2xm1(void)
3834
{
3835
    ST0 = pow(2.0,ST0) - 1.0;
3836
}
3837

    
3838
void helper_fyl2x(void)
3839
{
3840
    CPU86_LDouble fptemp;
3841

    
3842
    fptemp = ST0;
3843
    if (fptemp>0.0){
3844
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3845
        ST1 *= fptemp;
3846
        fpop();
3847
    } else {
3848
        env->fpus &= (~0x4700);
3849
        env->fpus |= 0x400;
3850
    }
3851
}
3852

    
3853
void helper_fptan(void)
3854
{
3855
    CPU86_LDouble fptemp;
3856

    
3857
    fptemp = ST0;
3858
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3859
        env->fpus |= 0x400;
3860
    } else {
3861
        ST0 = tan(fptemp);
3862
        fpush();
3863
        ST0 = 1.0;
3864
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3865
        /* the above code is for  |arg| < 2**52 only */
3866
    }
3867
}
3868

    
3869
void helper_fpatan(void)
3870
{
3871
    CPU86_LDouble fptemp, fpsrcop;
3872

    
3873
    fpsrcop = ST1;
3874
    fptemp = ST0;
3875
    ST1 = atan2(fpsrcop,fptemp);
3876
    fpop();
3877
}
3878

    
3879
void helper_fxtract(void)
3880
{
3881
    CPU86_LDoubleU temp;
3882
    unsigned int expdif;
3883

    
3884
    temp.d = ST0;
3885
    expdif = EXPD(temp) - EXPBIAS;
3886
    /*DP exponent bias*/
3887
    ST0 = expdif;
3888
    fpush();
3889
    BIASEXPONENT(temp);
3890
    ST0 = temp.d;
3891
}
3892

    
3893
void helper_fprem1(void)
3894
{
3895
    CPU86_LDouble dblq, fpsrcop, fptemp;
3896
    CPU86_LDoubleU fpsrcop1, fptemp1;
3897
    int expdif;
3898
    signed long long int q;
3899

    
3900
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3901
        ST0 = 0.0 / 0.0; /* NaN */
3902
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3903
        return;
3904
    }
3905

    
3906
    fpsrcop = ST0;
3907
    fptemp = ST1;
3908
    fpsrcop1.d = fpsrcop;
3909
    fptemp1.d = fptemp;
3910
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3911

    
3912
    if (expdif < 0) {
3913
        /* optimisation? taken from the AMD docs */
3914
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3915
        /* ST0 is unchanged */
3916
        return;
3917
    }
3918

    
3919
    if (expdif < 53) {
3920
        dblq = fpsrcop / fptemp;
3921
        /* round dblq towards nearest integer */
3922
        dblq = rint(dblq);
3923
        ST0 = fpsrcop - fptemp * dblq;
3924

    
3925
        /* convert dblq to q by truncating towards zero */
3926
        if (dblq < 0.0)
3927
           q = (signed long long int)(-dblq);
3928
        else
3929
           q = (signed long long int)dblq;
3930

    
3931
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3932
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3933
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3934
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3935
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3936
    } else {
3937
        env->fpus |= 0x400;  /* C2 <-- 1 */
3938
        fptemp = pow(2.0, expdif - 50);
3939
        fpsrcop = (ST0 / ST1) / fptemp;
3940
        /* fpsrcop = integer obtained by chopping */
3941
        fpsrcop = (fpsrcop < 0.0) ?
3942
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3943
        ST0 -= (ST1 * fpsrcop * fptemp);
3944
    }
3945
}
3946

    
3947
void helper_fprem(void)
3948
{
3949
    CPU86_LDouble dblq, fpsrcop, fptemp;
3950
    CPU86_LDoubleU fpsrcop1, fptemp1;
3951
    int expdif;
3952
    signed long long int q;
3953

    
3954
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3955
       ST0 = 0.0 / 0.0; /* NaN */
3956
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3957
       return;
3958
    }
3959

    
3960
    fpsrcop = (CPU86_LDouble)ST0;
3961
    fptemp = (CPU86_LDouble)ST1;
3962
    fpsrcop1.d = fpsrcop;
3963
    fptemp1.d = fptemp;
3964
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3965

    
3966
    if (expdif < 0) {
3967
        /* optimisation? taken from the AMD docs */
3968
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3969
        /* ST0 is unchanged */
3970
        return;
3971
    }
3972

    
3973
    if ( expdif < 53 ) {
3974
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3975
        /* round dblq towards zero */
3976
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3977
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3978

    
3979
        /* convert dblq to q by truncating towards zero */
3980
        if (dblq < 0.0)
3981
           q = (signed long long int)(-dblq);
3982
        else
3983
           q = (signed long long int)dblq;
3984

    
3985
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3987
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3988
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3989
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3990
    } else {
3991
        int N = 32 + (expdif % 32); /* as per AMD docs */
3992
        env->fpus |= 0x400;  /* C2 <-- 1 */
3993
        fptemp = pow(2.0, (double)(expdif - N));
3994
        fpsrcop = (ST0 / ST1) / fptemp;
3995
        /* fpsrcop = integer obtained by chopping */
3996
        fpsrcop = (fpsrcop < 0.0) ?
3997
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3998
        ST0 -= (ST1 * fpsrcop * fptemp);
3999
    }
4000
}
4001

    
4002
void helper_fyl2xp1(void)
4003
{
4004
    CPU86_LDouble fptemp;
4005

    
4006
    fptemp = ST0;
4007
    if ((fptemp+1.0)>0.0) {
4008
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4009
        ST1 *= fptemp;
4010
        fpop();
4011
    } else {
4012
        env->fpus &= (~0x4700);
4013
        env->fpus |= 0x400;
4014
    }
4015
}
4016

    
4017
void helper_fsqrt(void)
4018
{
4019
    CPU86_LDouble fptemp;
4020

    
4021
    fptemp = ST0;
4022
    if (fptemp<0.0) {
4023
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4024
        env->fpus |= 0x400;
4025
    }
4026
    ST0 = sqrt(fptemp);
4027
}
4028

    
4029
void helper_fsincos(void)
4030
{
4031
    CPU86_LDouble fptemp;
4032

    
4033
    fptemp = ST0;
4034
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4035
        env->fpus |= 0x400;
4036
    } else {
4037
        ST0 = sin(fptemp);
4038
        fpush();
4039
        ST0 = cos(fptemp);
4040
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4041
        /* the above code is for  |arg| < 2**63 only */
4042
    }
4043
}
4044

    
4045
void helper_frndint(void)
4046
{
4047
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4048
}
4049

    
4050
void helper_fscale(void)
4051
{
4052
    ST0 = ldexp (ST0, (int)(ST1));
4053
}
4054

    
4055
void helper_fsin(void)
4056
{
4057
    CPU86_LDouble fptemp;
4058

    
4059
    fptemp = ST0;
4060
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4061
        env->fpus |= 0x400;
4062
    } else {
4063
        ST0 = sin(fptemp);
4064
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4065
        /* the above code is for  |arg| < 2**53 only */
4066
    }
4067
}
4068

    
4069
void helper_fcos(void)
4070
{
4071
    CPU86_LDouble fptemp;
4072

    
4073
    fptemp = ST0;
4074
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4075
        env->fpus |= 0x400;
4076
    } else {
4077
        ST0 = cos(fptemp);
4078
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4079
        /* the above code is for  |arg5 < 2**63 only */
4080
    }
4081
}
4082

    
4083
void helper_fxam_ST0(void)
4084
{
4085
    CPU86_LDoubleU temp;
4086
    int expdif;
4087

    
4088
    temp.d = ST0;
4089

    
4090
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4091
    if (SIGND(temp))
4092
        env->fpus |= 0x200; /* C1 <-- 1 */
4093

    
4094
    /* XXX: test fptags too */
4095
    expdif = EXPD(temp);
4096
    if (expdif == MAXEXPD) {
4097
#ifdef USE_X86LDOUBLE
4098
        if (MANTD(temp) == 0x8000000000000000ULL)
4099
#else
4100
        if (MANTD(temp) == 0)
4101
#endif
4102
            env->fpus |=  0x500 /*Infinity*/;
4103
        else
4104
            env->fpus |=  0x100 /*NaN*/;
4105
    } else if (expdif == 0) {
4106
        if (MANTD(temp) == 0)
4107
            env->fpus |=  0x4000 /*Zero*/;
4108
        else
4109
            env->fpus |= 0x4400 /*Denormal*/;
4110
    } else {
4111
        env->fpus |= 0x400;
4112
    }
4113
}
4114

    
4115
void helper_fstenv(target_ulong ptr, int data32)
4116
{
4117
    int fpus, fptag, exp, i;
4118
    uint64_t mant;
4119
    CPU86_LDoubleU tmp;
4120

    
4121
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4122
    fptag = 0;
4123
    for (i=7; i>=0; i--) {
4124
        fptag <<= 2;
4125
        if (env->fptags[i]) {
4126
            fptag |= 3;
4127
        } else {
4128
            tmp.d = env->fpregs[i].d;
4129
            exp = EXPD(tmp);
4130
            mant = MANTD(tmp);
4131
            if (exp == 0 && mant == 0) {
4132
                /* zero */
4133
                fptag |= 1;
4134
            } else if (exp == 0 || exp == MAXEXPD
4135
#ifdef USE_X86LDOUBLE
4136
                       || (mant & (1LL << 63)) == 0
4137
#endif
4138
                       ) {
4139
                /* NaNs, infinity, denormal */
4140
                fptag |= 2;
4141
            }
4142
        }
4143
    }
4144
    if (data32) {
4145
        /* 32 bit */
4146
        stl(ptr, env->fpuc);
4147
        stl(ptr + 4, fpus);
4148
        stl(ptr + 8, fptag);
4149
        stl(ptr + 12, 0); /* fpip */
4150
        stl(ptr + 16, 0); /* fpcs */
4151
        stl(ptr + 20, 0); /* fpoo */
4152
        stl(ptr + 24, 0); /* fpos */
4153
    } else {
4154
        /* 16 bit */
4155
        stw(ptr, env->fpuc);
4156
        stw(ptr + 2, fpus);
4157
        stw(ptr + 4, fptag);
4158
        stw(ptr + 6, 0);
4159
        stw(ptr + 8, 0);
4160
        stw(ptr + 10, 0);
4161
        stw(ptr + 12, 0);
4162
    }
4163
}
4164

    
4165
void helper_fldenv(target_ulong ptr, int data32)
4166
{
4167
    int i, fpus, fptag;
4168

    
4169
    if (data32) {
4170
        env->fpuc = lduw(ptr);
4171
        fpus = lduw(ptr + 4);
4172
        fptag = lduw(ptr + 8);
4173
    }
4174
    else {
4175
        env->fpuc = lduw(ptr);
4176
        fpus = lduw(ptr + 2);
4177
        fptag = lduw(ptr + 4);
4178
    }
4179
    env->fpstt = (fpus >> 11) & 7;
4180
    env->fpus = fpus & ~0x3800;
4181
    for(i = 0;i < 8; i++) {
4182
        env->fptags[i] = ((fptag & 3) == 3);
4183
        fptag >>= 2;
4184
    }
4185
}
4186

    
4187
void helper_fsave(target_ulong ptr, int data32)
4188
{
4189
    CPU86_LDouble tmp;
4190
    int i;
4191

    
4192
    helper_fstenv(ptr, data32);
4193

    
4194
    ptr += (14 << data32);
4195
    for(i = 0;i < 8; i++) {
4196
        tmp = ST(i);
4197
        helper_fstt(tmp, ptr);
4198
        ptr += 10;
4199
    }
4200

    
4201
    /* fninit */
4202
    env->fpus = 0;
4203
    env->fpstt = 0;
4204
    env->fpuc = 0x37f;
4205
    env->fptags[0] = 1;
4206
    env->fptags[1] = 1;
4207
    env->fptags[2] = 1;
4208
    env->fptags[3] = 1;
4209
    env->fptags[4] = 1;
4210
    env->fptags[5] = 1;
4211
    env->fptags[6] = 1;
4212
    env->fptags[7] = 1;
4213
}
4214

    
4215
void helper_frstor(target_ulong ptr, int data32)
4216
{
4217
    CPU86_LDouble tmp;
4218
    int i;
4219

    
4220
    helper_fldenv(ptr, data32);
4221
    ptr += (14 << data32);
4222

    
4223
    for(i = 0;i < 8; i++) {
4224
        tmp = helper_fldt(ptr);
4225
        ST(i) = tmp;
4226
        ptr += 10;
4227
    }
4228
}
4229

    
4230
void helper_fxsave(target_ulong ptr, int data64)
4231
{
4232
    int fpus, fptag, i, nb_xmm_regs;
4233
    CPU86_LDouble tmp;
4234
    target_ulong addr;
4235

    
4236
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4237
    fptag = 0;
4238
    for(i = 0; i < 8; i++) {
4239
        fptag |= (env->fptags[i] << i);
4240
    }
4241
    stw(ptr, env->fpuc);
4242
    stw(ptr + 2, fpus);
4243
    stw(ptr + 4, fptag ^ 0xff);
4244
#ifdef TARGET_X86_64
4245
    if (data64) {
4246
        stq(ptr + 0x08, 0); /* rip */
4247
        stq(ptr + 0x10, 0); /* rdp */
4248
    } else 
4249
#endif
4250
    {
4251
        stl(ptr + 0x08, 0); /* eip */
4252
        stl(ptr + 0x0c, 0); /* sel  */
4253
        stl(ptr + 0x10, 0); /* dp */
4254
        stl(ptr + 0x14, 0); /* sel  */
4255
    }
4256

    
4257
    addr = ptr + 0x20;
4258
    for(i = 0;i < 8; i++) {
4259
        tmp = ST(i);
4260
        helper_fstt(tmp, addr);
4261
        addr += 16;
4262
    }
4263

    
4264
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4265
        /* XXX: finish it */
4266
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4267
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4268
        if (env->hflags & HF_CS64_MASK)
4269
            nb_xmm_regs = 16;
4270
        else
4271
            nb_xmm_regs = 8;
4272
        addr = ptr + 0xa0;
4273
        for(i = 0; i < nb_xmm_regs; i++) {
4274
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4275
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4276
            addr += 16;
4277
        }
4278
    }
4279
}
4280

    
4281
void helper_fxrstor(target_ulong ptr, int data64)
4282
{
4283
    int i, fpus, fptag, nb_xmm_regs;
4284
    CPU86_LDouble tmp;
4285
    target_ulong addr;
4286

    
4287
    env->fpuc = lduw(ptr);
4288
    fpus = lduw(ptr + 2);
4289
    fptag = lduw(ptr + 4);
4290
    env->fpstt = (fpus >> 11) & 7;
4291
    env->fpus = fpus & ~0x3800;
4292
    fptag ^= 0xff;
4293
    for(i = 0;i < 8; i++) {
4294
        env->fptags[i] = ((fptag >> i) & 1);
4295
    }
4296

    
4297
    addr = ptr + 0x20;
4298
    for(i = 0;i < 8; i++) {
4299
        tmp = helper_fldt(addr);
4300
        ST(i) = tmp;
4301
        addr += 16;
4302
    }
4303

    
4304
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4305
        /* XXX: finish it */
4306
        env->mxcsr = ldl(ptr + 0x18);
4307
        //ldl(ptr + 0x1c);
4308
        if (env->hflags & HF_CS64_MASK)
4309
            nb_xmm_regs = 16;
4310
        else
4311
            nb_xmm_regs = 8;
4312
        addr = ptr + 0xa0;
4313
        for(i = 0; i < nb_xmm_regs; i++) {
4314
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4315
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4316
            addr += 16;
4317
        }
4318
    }
4319
}
4320

    
4321
#ifndef USE_X86LDOUBLE
4322

    
4323
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4324
{
4325
    CPU86_LDoubleU temp;
4326
    int e;
4327

    
4328
    temp.d = f;
4329
    /* mantissa */
4330
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4331
    /* exponent + sign */
4332
    e = EXPD(temp) - EXPBIAS + 16383;
4333
    e |= SIGND(temp) >> 16;
4334
    *pexp = e;
4335
}
4336

    
4337
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4338
{
4339
    CPU86_LDoubleU temp;
4340
    int e;
4341
    uint64_t ll;
4342

    
4343
    /* XXX: handle overflow ? */
4344
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4345
    e |= (upper >> 4) & 0x800; /* sign */
4346
    ll = (mant >> 11) & ((1LL << 52) - 1);
4347
#ifdef __arm__
4348
    temp.l.upper = (e << 20) | (ll >> 32);
4349
    temp.l.lower = ll;
4350
#else
4351
    temp.ll = ll | ((uint64_t)e << 52);
4352
#endif
4353
    return temp.d;
4354
}
4355

    
4356
#else
4357

    
4358
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4359
{
4360
    CPU86_LDoubleU temp;
4361

    
4362
    temp.d = f;
4363
    *pmant = temp.l.lower;
4364
    *pexp = temp.l.upper;
4365
}
4366

    
4367
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4368
{
4369
    CPU86_LDoubleU temp;
4370

    
4371
    temp.l.upper = upper;
4372
    temp.l.lower = mant;
4373
    return temp.d;
4374
}
4375
#endif
4376

    
4377
#ifdef TARGET_X86_64
4378

    
4379
//#define DEBUG_MULDIV
4380

    
4381
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4382
{
4383
    *plow += a;
4384
    /* carry test */
4385
    if (*plow < a)
4386
        (*phigh)++;
4387
    *phigh += b;
4388
}
4389

    
4390
static void neg128(uint64_t *plow, uint64_t *phigh)
4391
{
4392
    *plow = ~ *plow;
4393
    *phigh = ~ *phigh;
4394
    add128(plow, phigh, 1, 0);
4395
}
4396

    
4397
/* return TRUE if overflow */
4398
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4399
{
4400
    uint64_t q, r, a1, a0;
4401
    int i, qb, ab;
4402

    
4403
    a0 = *plow;
4404
    a1 = *phigh;
4405
    if (a1 == 0) {
4406
        q = a0 / b;
4407
        r = a0 % b;
4408
        *plow = q;
4409
        *phigh = r;
4410
    } else {
4411
        if (a1 >= b)
4412
            return 1;
4413
        /* XXX: use a better algorithm */
4414
        for(i = 0; i < 64; i++) {
4415
            ab = a1 >> 63;
4416
            a1 = (a1 << 1) | (a0 >> 63);
4417
            if (ab || a1 >= b) {
4418
                a1 -= b;
4419
                qb = 1;
4420
            } else {
4421
                qb = 0;
4422
            }
4423
            a0 = (a0 << 1) | qb;
4424
        }
4425
#if defined(DEBUG_MULDIV)
4426
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4427
               *phigh, *plow, b, a0, a1);
4428
#endif
4429
        *plow = a0;
4430
        *phigh = a1;
4431
    }
4432
    return 0;
4433
}
4434

    
4435
/* return TRUE if overflow */
4436
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4437
{
4438
    int sa, sb;
4439
    sa = ((int64_t)*phigh < 0);
4440
    if (sa)
4441
        neg128(plow, phigh);
4442
    sb = (b < 0);
4443
    if (sb)
4444
        b = -b;
4445
    if (div64(plow, phigh, b) != 0)
4446
        return 1;
4447
    if (sa ^ sb) {
4448
        if (*plow > (1ULL << 63))
4449
            return 1;
4450
        *plow = - *plow;
4451
    } else {
4452
        if (*plow >= (1ULL << 63))
4453
            return 1;
4454
    }
4455
    if (sa)
4456
        *phigh = - *phigh;
4457
    return 0;
4458
}
4459

    
4460
void helper_mulq_EAX_T0(target_ulong t0)
4461
{
4462
    uint64_t r0, r1;
4463

    
4464
    mulu64(&r0, &r1, EAX, t0);
4465
    EAX = r0;
4466
    EDX = r1;
4467
    CC_DST = r0;
4468
    CC_SRC = r1;
4469
}
4470

    
4471
void helper_imulq_EAX_T0(target_ulong t0)
4472
{
4473
    uint64_t r0, r1;
4474

    
4475
    muls64(&r0, &r1, EAX, t0);
4476
    EAX = r0;
4477
    EDX = r1;
4478
    CC_DST = r0;
4479
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4480
}
4481

    
4482
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4483
{
4484
    uint64_t r0, r1;
4485

    
4486
    muls64(&r0, &r1, t0, t1);
4487
    CC_DST = r0;
4488
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4489
    return r0;
4490
}
4491

    
4492
void helper_divq_EAX(target_ulong t0)
4493
{
4494
    uint64_t r0, r1;
4495
    if (t0 == 0) {
4496
        raise_exception(EXCP00_DIVZ);
4497
    }
4498
    r0 = EAX;
4499
    r1 = EDX;
4500
    if (div64(&r0, &r1, t0))
4501
        raise_exception(EXCP00_DIVZ);
4502
    EAX = r0;
4503
    EDX = r1;
4504
}
4505

    
4506
void helper_idivq_EAX(target_ulong t0)
4507
{
4508
    uint64_t r0, r1;
4509
    if (t0 == 0) {
4510
        raise_exception(EXCP00_DIVZ);
4511
    }
4512
    r0 = EAX;
4513
    r1 = EDX;
4514
    if (idiv64(&r0, &r1, t0))
4515
        raise_exception(EXCP00_DIVZ);
4516
    EAX = r0;
4517
    EDX = r1;
4518
}
4519
#endif
4520

    
4521
static void do_hlt(void)
4522
{
4523
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4524
    env->halted = 1;
4525
    env->exception_index = EXCP_HLT;
4526
    cpu_loop_exit();
4527
}
4528

    
4529
void helper_hlt(int next_eip_addend)
4530
{
4531
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4532
    EIP += next_eip_addend;
4533
    
4534
    do_hlt();
4535
}
4536

    
4537
void helper_monitor(target_ulong ptr)
4538
{
4539
    if ((uint32_t)ECX != 0)
4540
        raise_exception(EXCP0D_GPF);
4541
    /* XXX: store address ? */
4542
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4543
}
4544

    
4545
void helper_mwait(int next_eip_addend)
4546
{
4547
    if ((uint32_t)ECX != 0)
4548
        raise_exception(EXCP0D_GPF);
4549
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4550
    EIP += next_eip_addend;
4551

    
4552
    /* XXX: not complete but not completely erroneous */
4553
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4554
        /* more than one CPU: do not sleep because another CPU may
4555
           wake this one */
4556
    } else {
4557
        do_hlt();
4558
    }
4559
}
4560

    
4561
void helper_debug(void)
4562
{
4563
    env->exception_index = EXCP_DEBUG;
4564
    cpu_loop_exit();
4565
}
4566

    
4567
void helper_raise_interrupt(int intno, int next_eip_addend)
4568
{
4569
    raise_interrupt(intno, 1, 0, next_eip_addend);
4570
}
4571

    
4572
void helper_raise_exception(int exception_index)
4573
{
4574
    raise_exception(exception_index);
4575
}
4576

    
4577
void helper_cli(void)
4578
{
4579
    env->eflags &= ~IF_MASK;
4580
}
4581

    
4582
void helper_sti(void)
4583
{
4584
    env->eflags |= IF_MASK;
4585
}
4586

    
4587
#if 0
4588
/* vm86plus instructions */
4589
void helper_cli_vm(void)
4590
{
4591
    env->eflags &= ~VIF_MASK;
4592
}
4593

4594
void helper_sti_vm(void)
4595
{
4596
    env->eflags |= VIF_MASK;
4597
    if (env->eflags & VIP_MASK) {
4598
        raise_exception(EXCP0D_GPF);
4599
    }
4600
}
4601
#endif
4602

    
4603
void helper_set_inhibit_irq(void)
4604
{
4605
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4606
}
4607

    
4608
void helper_reset_inhibit_irq(void)
4609
{
4610
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4611
}
4612

    
4613
void helper_boundw(target_ulong a0, int v)
4614
{
4615
    int low, high;
4616
    low = ldsw(a0);
4617
    high = ldsw(a0 + 2);
4618
    v = (int16_t)v;
4619
    if (v < low || v > high) {
4620
        raise_exception(EXCP05_BOUND);
4621
    }
4622
}
4623

    
4624
void helper_boundl(target_ulong a0, int v)
4625
{
4626
    int low, high;
4627
    low = ldl(a0);
4628
    high = ldl(a0 + 4);
4629
    if (v < low || v > high) {
4630
        raise_exception(EXCP05_BOUND);
4631
    }
4632
}
4633

    
4634
static float approx_rsqrt(float a)
4635
{
4636
    return 1.0 / sqrt(a);
4637
}
4638

    
4639
static float approx_rcp(float a)
4640
{
4641
    return 1.0 / a;
4642
}
4643

    
4644
#if !defined(CONFIG_USER_ONLY)
4645

    
4646
#define MMUSUFFIX _mmu
4647

    
4648
#define SHIFT 0
4649
#include "softmmu_template.h"
4650

    
4651
#define SHIFT 1
4652
#include "softmmu_template.h"
4653

    
4654
#define SHIFT 2
4655
#include "softmmu_template.h"
4656

    
4657
#define SHIFT 3
4658
#include "softmmu_template.h"
4659

    
4660
#endif
4661

    
4662
#if !defined(CONFIG_USER_ONLY)
4663
/* try to fill the TLB and return an exception if error. If retaddr is
4664
   NULL, it means that the function was called in C code (i.e. not
4665
   from generated code or from helper.c) */
4666
/* XXX: fix it to restore all registers */
4667
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4668
{
4669
    TranslationBlock *tb;
4670
    int ret;
4671
    unsigned long pc;
4672
    CPUX86State *saved_env;
4673

    
4674
    /* XXX: hack to restore env in all cases, even if not called from
4675
       generated code */
4676
    saved_env = env;
4677
    env = cpu_single_env;
4678

    
4679
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4680
    if (ret) {
4681
        if (retaddr) {
4682
            /* now we have a real cpu fault */
4683
            pc = (unsigned long)retaddr;
4684
            tb = tb_find_pc(pc);
4685
            if (tb) {
4686
                /* the PC is inside the translated code. It means that we have
4687
                   a virtual CPU fault */
4688
                cpu_restore_state(tb, env, pc, NULL);
4689
            }
4690
        }
4691
        raise_exception_err(env->exception_index, env->error_code);
4692
    }
4693
    env = saved_env;
4694
}
4695
#endif
4696

    
4697
/* Secure Virtual Machine helpers */
4698

    
4699
#if defined(CONFIG_USER_ONLY)
4700

    
4701
void helper_vmrun(int aflag, int next_eip_addend)
4702
{ 
4703
}
4704
void helper_vmmcall(void) 
4705
{ 
4706
}
4707
void helper_vmload(int aflag)
4708
{ 
4709
}
4710
void helper_vmsave(int aflag)
4711
{ 
4712
}
4713
void helper_stgi(void)
4714
{
4715
}
4716
void helper_clgi(void)
4717
{
4718
}
4719
void helper_skinit(void) 
4720
{ 
4721
}
4722
void helper_invlpga(int aflag)
4723
{ 
4724
}
4725
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4726
{ 
4727
}
4728
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4729
{
4730
}
4731

    
4732
void helper_svm_check_io(uint32_t port, uint32_t param, 
4733
                         uint32_t next_eip_addend)
4734
{
4735
}
4736
#else
4737

    
4738
static inline void svm_save_seg(target_phys_addr_t addr,
4739
                                const SegmentCache *sc)
4740
{
4741
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4742
             sc->selector);
4743
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4744
             sc->base);
4745
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4746
             sc->limit);
4747
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4748
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4749
}
4750
                                
4751
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4752
{
4753
    unsigned int flags;
4754

    
4755
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4756
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4757
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4758
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4759
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4760
}
4761

    
4762
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4763
                                      CPUState *env, int seg_reg)
4764
{
4765
    SegmentCache sc1, *sc = &sc1;
4766
    svm_load_seg(addr, sc);
4767
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4768
                           sc->base, sc->limit, sc->flags);
4769
}
4770

    
4771
void helper_vmrun(int aflag, int next_eip_addend)
4772
{
4773
    target_ulong addr;
4774
    uint32_t event_inj;
4775
    uint32_t int_ctl;
4776

    
4777
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4778

    
4779
    if (aflag == 2)
4780
        addr = EAX;
4781
    else
4782
        addr = (uint32_t)EAX;
4783

    
4784
    if (loglevel & CPU_LOG_TB_IN_ASM)
4785
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4786

    
4787
    env->vm_vmcb = addr;
4788

    
4789
    /* save the current CPU state in the hsave page */
4790
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4791
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4792

    
4793
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4794
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4795

    
4796
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4797
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4798
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4799
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4800
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4801
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4802

    
4803
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4804
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4805

    
4806
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4807
                  &env->segs[R_ES]);
4808
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4809
                 &env->segs[R_CS]);
4810
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4811
                 &env->segs[R_SS]);
4812
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4813
                 &env->segs[R_DS]);
4814

    
4815
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4816
             EIP + next_eip_addend);
4817
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4818
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4819

    
4820
    /* load the interception bitmaps so we do not need to access the
4821
       vmcb in svm mode */
4822
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4823
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4824
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4825
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4826
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4827
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4828

    
4829
    /* enable intercepts */
4830
    env->hflags |= HF_SVMI_MASK;
4831

    
4832
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4833

    
4834
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4835
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4836

    
4837
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4838
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4839

    
4840
    /* clear exit_info_2 so we behave like the real hardware */
4841
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4842

    
4843
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4844
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4845
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4846
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4847
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4848
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4849
    if (int_ctl & V_INTR_MASKING_MASK) {
4850
        env->v_tpr = int_ctl & V_TPR_MASK;
4851
        env->hflags2 |= HF2_VINTR_MASK;
4852
        if (env->eflags & IF_MASK)
4853
            env->hflags2 |= HF2_HIF_MASK;
4854
    }
4855

    
4856
    cpu_load_efer(env, 
4857
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4858
    env->eflags = 0;
4859
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4860
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4861
    CC_OP = CC_OP_EFLAGS;
4862

    
4863
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4864
                       env, R_ES);
4865
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4866
                       env, R_CS);
4867
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4868
                       env, R_SS);
4869
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4870
                       env, R_DS);
4871

    
4872
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4873
    env->eip = EIP;
4874
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4875
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4876
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4877
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4878
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4879

    
4880
    /* FIXME: guest state consistency checks */
4881

    
4882
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4883
        case TLB_CONTROL_DO_NOTHING:
4884
            break;
4885
        case TLB_CONTROL_FLUSH_ALL_ASID:
4886
            /* FIXME: this is not 100% correct but should work for now */
4887
            tlb_flush(env, 1);
4888
        break;
4889
    }
4890

    
4891
    env->hflags2 |= HF2_GIF_MASK;
4892

    
4893
    if (int_ctl & V_IRQ_MASK) {
4894
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4895
    }
4896

    
4897
    /* maybe we need to inject an event */
4898
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4899
    if (event_inj & SVM_EVTINJ_VALID) {
4900
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4901
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4902
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4903
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4904

    
4905
        if (loglevel & CPU_LOG_TB_IN_ASM)
4906
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4907
        /* FIXME: need to implement valid_err */
4908
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4909
        case SVM_EVTINJ_TYPE_INTR:
4910
                env->exception_index = vector;
4911
                env->error_code = event_inj_err;
4912
                env->exception_is_int = 0;
4913
                env->exception_next_eip = -1;
4914
                if (loglevel & CPU_LOG_TB_IN_ASM)
4915
                    fprintf(logfile, "INTR");
4916
                /* XXX: is it always correct ? */
4917
                do_interrupt(vector, 0, 0, 0, 1);
4918
                break;
4919
        case SVM_EVTINJ_TYPE_NMI:
4920
                env->exception_index = EXCP02_NMI;
4921
                env->error_code = event_inj_err;
4922
                env->exception_is_int = 0;
4923
                env->exception_next_eip = EIP;
4924
                if (loglevel & CPU_LOG_TB_IN_ASM)
4925
                    fprintf(logfile, "NMI");
4926
                cpu_loop_exit();
4927
                break;
4928
        case SVM_EVTINJ_TYPE_EXEPT:
4929
                env->exception_index = vector;
4930
                env->error_code = event_inj_err;
4931
                env->exception_is_int = 0;
4932
                env->exception_next_eip = -1;
4933
                if (loglevel & CPU_LOG_TB_IN_ASM)
4934
                    fprintf(logfile, "EXEPT");
4935
                cpu_loop_exit();
4936
                break;
4937
        case SVM_EVTINJ_TYPE_SOFT:
4938
                env->exception_index = vector;
4939
                env->error_code = event_inj_err;
4940
                env->exception_is_int = 1;
4941
                env->exception_next_eip = EIP;
4942
                if (loglevel & CPU_LOG_TB_IN_ASM)
4943
                    fprintf(logfile, "SOFT");
4944
                cpu_loop_exit();
4945
                break;
4946
        }
4947
        if (loglevel & CPU_LOG_TB_IN_ASM)
4948
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4949
    }
4950
}
4951

    
4952
void helper_vmmcall(void)
4953
{
4954
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4955
    raise_exception(EXCP06_ILLOP);
4956
}
4957

    
4958
void helper_vmload(int aflag)
4959
{
4960
    target_ulong addr;
4961
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4962

    
4963
    if (aflag == 2)
4964
        addr = EAX;
4965
    else
4966
        addr = (uint32_t)EAX;
4967

    
4968
    if (loglevel & CPU_LOG_TB_IN_ASM)
4969
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4970
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4971
                env->segs[R_FS].base);
4972

    
4973
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4974
                       env, R_FS);
4975
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4976
                       env, R_GS);
4977
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4978
                 &env->tr);
4979
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4980
                 &env->ldt);
4981

    
4982
#ifdef TARGET_X86_64
4983
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4984
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4985
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4986
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4987
#endif
4988
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4989
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4990
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4991
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4992
}
4993

    
4994
void helper_vmsave(int aflag)
4995
{
4996
    target_ulong addr;
4997
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4998

    
4999
    if (aflag == 2)
5000
        addr = EAX;
5001
    else
5002
        addr = (uint32_t)EAX;
5003

    
5004
    if (loglevel & CPU_LOG_TB_IN_ASM)
5005
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5006
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5007
                env->segs[R_FS].base);
5008

    
5009
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5010
                 &env->segs[R_FS]);
5011
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5012
                 &env->segs[R_GS]);
5013
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5014
                 &env->tr);
5015
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5016
                 &env->ldt);
5017

    
5018
#ifdef TARGET_X86_64
5019
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5020
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5021
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5022
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5023
#endif
5024
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5025
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5026
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5027
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5028
}
5029

    
5030
void helper_stgi(void)
5031
{
5032
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5033
    env->hflags2 |= HF2_GIF_MASK;
5034
}
5035

    
5036
void helper_clgi(void)
5037
{
5038
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5039
    env->hflags2 &= ~HF2_GIF_MASK;
5040
}
5041

    
5042
void helper_skinit(void)
5043
{
5044
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5045
    /* XXX: not implemented */
5046
    raise_exception(EXCP06_ILLOP);
5047
}
5048

    
5049
void helper_invlpga(int aflag)
5050
{
5051
    target_ulong addr;
5052
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5053
    
5054
    if (aflag == 2)
5055
        addr = EAX;
5056
    else
5057
        addr = (uint32_t)EAX;
5058

    
5059
    /* XXX: could use the ASID to see if it is needed to do the
5060
       flush */
5061
    tlb_flush_page(env, addr);
5062
}
5063

    
5064
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5065
{
5066
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5067
        return;
5068
    switch(type) {
5069
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5070
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5071
            helper_vmexit(type, param);
5072
        }
5073
        break;
5074
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5075
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5076
            helper_vmexit(type, param);
5077
        }
5078
        break;
5079
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5080
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5081
            helper_vmexit(type, param);
5082
        }
5083
        break;
5084
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5085
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5086
            helper_vmexit(type, param);
5087
        }
5088
        break;
5089
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5090
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5091
            helper_vmexit(type, param);
5092
        }
5093
        break;
5094
    case SVM_EXIT_MSR:
5095
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5096
            /* FIXME: this should be read in at vmrun (faster this way?) */
5097
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5098
            uint32_t t0, t1;
5099
            switch((uint32_t)ECX) {
5100
            case 0 ... 0x1fff:
5101
                t0 = (ECX * 2) % 8;
5102
                t1 = ECX / 8;
5103
                break;
5104
            case 0xc0000000 ... 0xc0001fff:
5105
                t0 = (8192 + ECX - 0xc0000000) * 2;
5106
                t1 = (t0 / 8);
5107
                t0 %= 8;
5108
                break;
5109
            case 0xc0010000 ... 0xc0011fff:
5110
                t0 = (16384 + ECX - 0xc0010000) * 2;
5111
                t1 = (t0 / 8);
5112
                t0 %= 8;
5113
                break;
5114
            default:
5115
                helper_vmexit(type, param);
5116
                t0 = 0;
5117
                t1 = 0;
5118
                break;
5119
            }
5120
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5121
                helper_vmexit(type, param);
5122
        }
5123
        break;
5124
    default:
5125
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5126
            helper_vmexit(type, param);
5127
        }
5128
        break;
5129
    }
5130
}
5131

    
5132
void helper_svm_check_io(uint32_t port, uint32_t param, 
5133
                         uint32_t next_eip_addend)
5134
{
5135
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5136
        /* FIXME: this should be read in at vmrun (faster this way?) */
5137
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5138
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5139
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5140
            /* next EIP */
5141
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5142
                     env->eip + next_eip_addend);
5143
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5144
        }
5145
    }
5146
}
5147

    
5148
/* Note: currently only 32 bits of exit_code are used */
5149
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5150
{
5151
    uint32_t int_ctl;
5152

    
5153
    if (loglevel & CPU_LOG_TB_IN_ASM)
5154
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5155
                exit_code, exit_info_1,
5156
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5157
                EIP);
5158

    
5159
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5160
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5161
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5162
    } else {
5163
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5164
    }
5165

    
5166
    /* Save the VM state in the vmcb */
5167
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5168
                 &env->segs[R_ES]);
5169
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5170
                 &env->segs[R_CS]);
5171
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5172
                 &env->segs[R_SS]);
5173
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5174
                 &env->segs[R_DS]);
5175

    
5176
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5177
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5178

    
5179
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5180
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5181

    
5182
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5183
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5184
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5185
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5186
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5187

    
5188
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5189
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5190
    int_ctl |= env->v_tpr & V_TPR_MASK;
5191
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5192
        int_ctl |= V_IRQ_MASK;
5193
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5194

    
5195
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5196
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5197
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5198
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5199
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5200
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5201
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5202

    
5203
    /* Reload the host state from vm_hsave */
5204
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5205
    env->hflags &= ~HF_SVMI_MASK;
5206
    env->intercept = 0;
5207
    env->intercept_exceptions = 0;
5208
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5209
    env->tsc_offset = 0;
5210

    
5211
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5212
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5213

    
5214
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5215
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5216

    
5217
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5218
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5219
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5220
    /* we need to set the efer after the crs so the hidden flags get
5221
       set properly */
5222
    cpu_load_efer(env, 
5223
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5224
    env->eflags = 0;
5225
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5226
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5227
    CC_OP = CC_OP_EFLAGS;
5228

    
5229
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5230
                       env, R_ES);
5231
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5232
                       env, R_CS);
5233
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5234
                       env, R_SS);
5235
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5236
                       env, R_DS);
5237

    
5238
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5239
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5240
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5241

    
5242
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5243
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5244

    
5245
    /* other setups */
5246
    cpu_x86_set_cpl(env, 0);
5247
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5248
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5249

    
5250
    env->hflags2 &= ~HF2_GIF_MASK;
5251
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5252

    
5253
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5254

    
5255
    /* Clears the TSC_OFFSET inside the processor. */
5256

    
5257
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5258
       from the page table indicated the host's CR3. If the PDPEs contain
5259
       illegal state, the processor causes a shutdown. */
5260

    
5261
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5262
    env->cr[0] |= CR0_PE_MASK;
5263
    env->eflags &= ~VM_MASK;
5264

    
5265
    /* Disables all breakpoints in the host DR7 register. */
5266

    
5267
    /* Checks the reloaded host state for consistency. */
5268

    
5269
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5270
       host's code segment or non-canonical (in the case of long mode), a
5271
       #GP fault is delivered inside the host.) */
5272

    
5273
    /* remove any pending exception */
5274
    env->exception_index = -1;
5275
    env->error_code = 0;
5276
    env->old_exception = -1;
5277

    
5278
    cpu_loop_exit();
5279
}
5280

    
5281
#endif
5282

    
5283
/* MMX/SSE */
5284
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5285
void helper_enter_mmx(void)
5286
{
5287
    env->fpstt = 0;
5288
    *(uint32_t *)(env->fptags) = 0;
5289
    *(uint32_t *)(env->fptags + 4) = 0;
5290
}
5291

    
5292
void helper_emms(void)
5293
{
5294
    /* set to empty state */
5295
    *(uint32_t *)(env->fptags) = 0x01010101;
5296
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5297
}
5298

    
5299
/* XXX: suppress */
5300
void helper_movq(void *d, void *s)
5301
{
5302
    *(uint64_t *)d = *(uint64_t *)s;
5303
}
5304

    
5305
#define SHIFT 0
5306
#include "ops_sse.h"
5307

    
5308
#define SHIFT 1
5309
#include "ops_sse.h"
5310

    
5311
#define SHIFT 0
5312
#include "helper_template.h"
5313
#undef SHIFT
5314

    
5315
#define SHIFT 1
5316
#include "helper_template.h"
5317
#undef SHIFT
5318

    
5319
#define SHIFT 2
5320
#include "helper_template.h"
5321
#undef SHIFT
5322

    
5323
#ifdef TARGET_X86_64
5324

    
5325
#define SHIFT 3
5326
#include "helper_template.h"
5327
#undef SHIFT
5328

    
5329
#endif
5330

    
5331
/* bit operations */
5332
target_ulong helper_bsf(target_ulong t0)
5333
{
5334
    int count;
5335
    target_ulong res;
5336

    
5337
    res = t0;
5338
    count = 0;
5339
    while ((res & 1) == 0) {
5340
        count++;
5341
        res >>= 1;
5342
    }
5343
    return count;
5344
}
5345

    
5346
target_ulong helper_bsr(target_ulong t0)
5347
{
5348
    int count;
5349
    target_ulong res, mask;
5350
    
5351
    res = t0;
5352
    count = TARGET_LONG_BITS - 1;
5353
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5354
    while ((res & mask) == 0) {
5355
        count--;
5356
        res <<= 1;
5357
    }
5358
    return count;
5359
}
5360

    
5361

    
5362
static int compute_all_eflags(void)
5363
{
5364
    return CC_SRC;
5365
}
5366

    
5367
static int compute_c_eflags(void)
5368
{
5369
    return CC_SRC & CC_C;
5370
}
5371

    
5372
uint32_t helper_cc_compute_all(int op)
5373
{
5374
    switch (op) {
5375
    default: /* should never happen */ return 0;
5376

    
5377
    case CC_OP_EFLAGS: return compute_all_eflags();
5378

    
5379
    case CC_OP_MULB: return compute_all_mulb();
5380
    case CC_OP_MULW: return compute_all_mulw();
5381
    case CC_OP_MULL: return compute_all_mull();
5382

    
5383
    case CC_OP_ADDB: return compute_all_addb();
5384
    case CC_OP_ADDW: return compute_all_addw();
5385
    case CC_OP_ADDL: return compute_all_addl();
5386

    
5387
    case CC_OP_ADCB: return compute_all_adcb();
5388
    case CC_OP_ADCW: return compute_all_adcw();
5389
    case CC_OP_ADCL: return compute_all_adcl();
5390

    
5391
    case CC_OP_SUBB: return compute_all_subb();
5392
    case CC_OP_SUBW: return compute_all_subw();
5393
    case CC_OP_SUBL: return compute_all_subl();
5394

    
5395
    case CC_OP_SBBB: return compute_all_sbbb();
5396
    case CC_OP_SBBW: return compute_all_sbbw();
5397
    case CC_OP_SBBL: return compute_all_sbbl();
5398

    
5399
    case CC_OP_LOGICB: return compute_all_logicb();
5400
    case CC_OP_LOGICW: return compute_all_logicw();
5401
    case CC_OP_LOGICL: return compute_all_logicl();
5402

    
5403
    case CC_OP_INCB: return compute_all_incb();
5404
    case CC_OP_INCW: return compute_all_incw();
5405
    case CC_OP_INCL: return compute_all_incl();
5406

    
5407
    case CC_OP_DECB: return compute_all_decb();
5408
    case CC_OP_DECW: return compute_all_decw();
5409
    case CC_OP_DECL: return compute_all_decl();
5410

    
5411
    case CC_OP_SHLB: return compute_all_shlb();
5412
    case CC_OP_SHLW: return compute_all_shlw();
5413
    case CC_OP_SHLL: return compute_all_shll();
5414

    
5415
    case CC_OP_SARB: return compute_all_sarb();
5416
    case CC_OP_SARW: return compute_all_sarw();
5417
    case CC_OP_SARL: return compute_all_sarl();
5418

    
5419
#ifdef TARGET_X86_64
5420
    case CC_OP_MULQ: return compute_all_mulq();
5421

    
5422
    case CC_OP_ADDQ: return compute_all_addq();
5423

    
5424
    case CC_OP_ADCQ: return compute_all_adcq();
5425

    
5426
    case CC_OP_SUBQ: return compute_all_subq();
5427

    
5428
    case CC_OP_SBBQ: return compute_all_sbbq();
5429

    
5430
    case CC_OP_LOGICQ: return compute_all_logicq();
5431

    
5432
    case CC_OP_INCQ: return compute_all_incq();
5433

    
5434
    case CC_OP_DECQ: return compute_all_decq();
5435

    
5436
    case CC_OP_SHLQ: return compute_all_shlq();
5437

    
5438
    case CC_OP_SARQ: return compute_all_sarq();
5439
#endif
5440
    }
5441
}
5442

    
5443
uint32_t helper_cc_compute_c(int op)
5444
{
5445
    switch (op) {
5446
    default: /* should never happen */ return 0;
5447

    
5448
    case CC_OP_EFLAGS: return compute_c_eflags();
5449

    
5450
    case CC_OP_MULB: return compute_c_mull();
5451
    case CC_OP_MULW: return compute_c_mull();
5452
    case CC_OP_MULL: return compute_c_mull();
5453

    
5454
    case CC_OP_ADDB: return compute_c_addb();
5455
    case CC_OP_ADDW: return compute_c_addw();
5456
    case CC_OP_ADDL: return compute_c_addl();
5457

    
5458
    case CC_OP_ADCB: return compute_c_adcb();
5459
    case CC_OP_ADCW: return compute_c_adcw();
5460
    case CC_OP_ADCL: return compute_c_adcl();
5461

    
5462
    case CC_OP_SUBB: return compute_c_subb();
5463
    case CC_OP_SUBW: return compute_c_subw();
5464
    case CC_OP_SUBL: return compute_c_subl();
5465

    
5466
    case CC_OP_SBBB: return compute_c_sbbb();
5467
    case CC_OP_SBBW: return compute_c_sbbw();
5468
    case CC_OP_SBBL: return compute_c_sbbl();
5469

    
5470
    case CC_OP_LOGICB: return compute_c_logicb();
5471
    case CC_OP_LOGICW: return compute_c_logicw();
5472
    case CC_OP_LOGICL: return compute_c_logicl();
5473

    
5474
    case CC_OP_INCB: return compute_c_incl();
5475
    case CC_OP_INCW: return compute_c_incl();
5476
    case CC_OP_INCL: return compute_c_incl();
5477

    
5478
    case CC_OP_DECB: return compute_c_incl();
5479
    case CC_OP_DECW: return compute_c_incl();
5480
    case CC_OP_DECL: return compute_c_incl();
5481

    
5482
    case CC_OP_SHLB: return compute_c_shlb();
5483
    case CC_OP_SHLW: return compute_c_shlw();
5484
    case CC_OP_SHLL: return compute_c_shll();
5485

    
5486
    case CC_OP_SARB: return compute_c_sarl();
5487
    case CC_OP_SARW: return compute_c_sarl();
5488
    case CC_OP_SARL: return compute_c_sarl();
5489

    
5490
#ifdef TARGET_X86_64
5491
    case CC_OP_MULQ: return compute_c_mull();
5492

    
5493
    case CC_OP_ADDQ: return compute_c_addq();
5494

    
5495
    case CC_OP_ADCQ: return compute_c_adcq();
5496

    
5497
    case CC_OP_SUBQ: return compute_c_subq();
5498

    
5499
    case CC_OP_SBBQ: return compute_c_sbbq();
5500

    
5501
    case CC_OP_LOGICQ: return compute_c_logicq();
5502

    
5503
    case CC_OP_INCQ: return compute_c_incl();
5504

    
5505
    case CC_OP_DECQ: return compute_c_incl();
5506

    
5507
    case CC_OP_SHLQ: return compute_c_shlq();
5508

    
5509
    case CC_OP_SARQ: return compute_c_sarl();
5510
#endif
5511
    }
5512
}