Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 07be379f

History | View | Annotate | Download (148.2 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
/* return non zero if error */
112
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
113
                               int selector)
114
{
115
    SegmentCache *dt;
116
    int index;
117
    target_ulong ptr;
118

    
119
    if (selector & 0x4)
120
        dt = &env->ldt;
121
    else
122
        dt = &env->gdt;
123
    index = selector & ~7;
124
    if ((index + 7) > dt->limit)
125
        return -1;
126
    ptr = dt->base + index;
127
    *e1_ptr = ldl_kernel(ptr);
128
    *e2_ptr = ldl_kernel(ptr + 4);
129
    return 0;
130
}
131

    
132
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
133
{
134
    unsigned int limit;
135
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
136
    if (e2 & DESC_G_MASK)
137
        limit = (limit << 12) | 0xfff;
138
    return limit;
139
}
140

    
141
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142
{
143
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
144
}
145

    
146
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147
{
148
    sc->base = get_seg_base(e1, e2);
149
    sc->limit = get_seg_limit(e1, e2);
150
    sc->flags = e2;
151
}
152

    
153
/* init the segment cache in vm86 mode. */
154
static inline void load_seg_vm(int seg, int selector)
155
{
156
    selector &= 0xffff;
157
    cpu_x86_load_seg_cache(env, seg, selector,
158
                           (selector << 4), 0xffff, 0);
159
}
160

    
161
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
162
                                       uint32_t *esp_ptr, int dpl)
163
{
164
    int type, index, shift;
165

    
166
#if 0
167
    {
168
        int i;
169
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
170
        for(i=0;i<env->tr.limit;i++) {
171
            printf("%02x ", env->tr.base[i]);
172
            if ((i & 7) == 7) printf("\n");
173
        }
174
        printf("\n");
175
    }
176
#endif
177

    
178
    if (!(env->tr.flags & DESC_P_MASK))
179
        cpu_abort(env, "invalid tss");
180
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181
    if ((type & 7) != 1)
182
        cpu_abort(env, "invalid tss type");
183
    shift = type >> 3;
184
    index = (dpl * 4 + 2) << shift;
185
    if (index + (4 << shift) - 1 > env->tr.limit)
186
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187
    if (shift == 0) {
188
        *esp_ptr = lduw_kernel(env->tr.base + index);
189
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190
    } else {
191
        *esp_ptr = ldl_kernel(env->tr.base + index);
192
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
193
    }
194
}
195

    
196
/* XXX: merge with load_seg() */
197
static void tss_load_seg(int seg_reg, int selector)
198
{
199
    uint32_t e1, e2;
200
    int rpl, dpl, cpl;
201

    
202
    if ((selector & 0xfffc) != 0) {
203
        if (load_segment(&e1, &e2, selector) != 0)
204
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205
        if (!(e2 & DESC_S_MASK))
206
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207
        rpl = selector & 3;
208
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
209
        cpl = env->hflags & HF_CPL_MASK;
210
        if (seg_reg == R_CS) {
211
            if (!(e2 & DESC_CS_MASK))
212
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213
            /* XXX: is it correct ? */
214
            if (dpl != rpl)
215
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
            if ((e2 & DESC_C_MASK) && dpl > rpl)
217
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218
        } else if (seg_reg == R_SS) {
219
            /* SS must be writable data */
220
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
            if (dpl != cpl || dpl != rpl)
223
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224
        } else {
225
            /* not readable code */
226
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
227
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228
            /* if data or non conforming code, checks the rights */
229
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
230
                if (dpl < cpl || dpl < rpl)
231
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
            }
233
        }
234
        if (!(e2 & DESC_P_MASK))
235
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
236
        cpu_x86_load_seg_cache(env, seg_reg, selector,
237
                       get_seg_base(e1, e2),
238
                       get_seg_limit(e1, e2),
239
                       e2);
240
    } else {
241
        if (seg_reg == R_SS || seg_reg == R_CS)
242
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
    }
244
}
245

    
246
#define SWITCH_TSS_JMP  0
247
#define SWITCH_TSS_IRET 1
248
#define SWITCH_TSS_CALL 2
249

    
250
/* XXX: restore CPU state in registers (PowerPC case) */
251
static void switch_tss(int tss_selector,
252
                       uint32_t e1, uint32_t e2, int source,
253
                       uint32_t next_eip)
254
{
255
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
256
    target_ulong tss_base;
257
    uint32_t new_regs[8], new_segs[6];
258
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
259
    uint32_t old_eflags, eflags_mask;
260
    SegmentCache *dt;
261
    int index;
262
    target_ulong ptr;
263

    
264
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265
#ifdef DEBUG_PCALL
266
    if (loglevel & CPU_LOG_PCALL)
267
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
268
#endif
269

    
270
    /* if task gate, we read the TSS segment and we load it */
271
    if (type == 5) {
272
        if (!(e2 & DESC_P_MASK))
273
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
274
        tss_selector = e1 >> 16;
275
        if (tss_selector & 4)
276
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
277
        if (load_segment(&e1, &e2, tss_selector) != 0)
278
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279
        if (e2 & DESC_S_MASK)
280
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
281
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282
        if ((type & 7) != 1)
283
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284
    }
285

    
286
    if (!(e2 & DESC_P_MASK))
287
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288

    
289
    if (type & 8)
290
        tss_limit_max = 103;
291
    else
292
        tss_limit_max = 43;
293
    tss_limit = get_seg_limit(e1, e2);
294
    tss_base = get_seg_base(e1, e2);
295
    if ((tss_selector & 4) != 0 ||
296
        tss_limit < tss_limit_max)
297
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299
    if (old_type & 8)
300
        old_tss_limit_max = 103;
301
    else
302
        old_tss_limit_max = 43;
303

    
304
    /* read all the registers from the new TSS */
305
    if (type & 8) {
306
        /* 32 bit */
307
        new_cr3 = ldl_kernel(tss_base + 0x1c);
308
        new_eip = ldl_kernel(tss_base + 0x20);
309
        new_eflags = ldl_kernel(tss_base + 0x24);
310
        for(i = 0; i < 8; i++)
311
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
312
        for(i = 0; i < 6; i++)
313
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
314
        new_ldt = lduw_kernel(tss_base + 0x60);
315
        new_trap = ldl_kernel(tss_base + 0x64);
316
    } else {
317
        /* 16 bit */
318
        new_cr3 = 0;
319
        new_eip = lduw_kernel(tss_base + 0x0e);
320
        new_eflags = lduw_kernel(tss_base + 0x10);
321
        for(i = 0; i < 8; i++)
322
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
323
        for(i = 0; i < 4; i++)
324
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
325
        new_ldt = lduw_kernel(tss_base + 0x2a);
326
        new_segs[R_FS] = 0;
327
        new_segs[R_GS] = 0;
328
        new_trap = 0;
329
    }
330

    
331
    /* NOTE: we must avoid memory exceptions during the task switch,
332
       so we make dummy accesses before */
333
    /* XXX: it can still fail in some cases, so a bigger hack is
334
       necessary to valid the TLB after having done the accesses */
335

    
336
    v1 = ldub_kernel(env->tr.base);
337
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
338
    stb_kernel(env->tr.base, v1);
339
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
340

    
341
    /* clear busy bit (it is restartable) */
342
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
343
        target_ulong ptr;
344
        uint32_t e2;
345
        ptr = env->gdt.base + (env->tr.selector & ~7);
346
        e2 = ldl_kernel(ptr + 4);
347
        e2 &= ~DESC_TSS_BUSY_MASK;
348
        stl_kernel(ptr + 4, e2);
349
    }
350
    old_eflags = compute_eflags();
351
    if (source == SWITCH_TSS_IRET)
352
        old_eflags &= ~NT_MASK;
353

    
354
    /* save the current state in the old TSS */
355
    if (type & 8) {
356
        /* 32 bit */
357
        stl_kernel(env->tr.base + 0x20, next_eip);
358
        stl_kernel(env->tr.base + 0x24, old_eflags);
359
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
360
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
361
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
362
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
363
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
364
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
365
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
366
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
367
        for(i = 0; i < 6; i++)
368
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
369
    } else {
370
        /* 16 bit */
371
        stw_kernel(env->tr.base + 0x0e, next_eip);
372
        stw_kernel(env->tr.base + 0x10, old_eflags);
373
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
374
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
375
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
376
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
377
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
378
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
379
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
380
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
381
        for(i = 0; i < 4; i++)
382
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
383
    }
384

    
385
    /* now if an exception occurs, it will occurs in the next task
386
       context */
387

    
388
    if (source == SWITCH_TSS_CALL) {
389
        stw_kernel(tss_base, env->tr.selector);
390
        new_eflags |= NT_MASK;
391
    }
392

    
393
    /* set busy bit */
394
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
395
        target_ulong ptr;
396
        uint32_t e2;
397
        ptr = env->gdt.base + (tss_selector & ~7);
398
        e2 = ldl_kernel(ptr + 4);
399
        e2 |= DESC_TSS_BUSY_MASK;
400
        stl_kernel(ptr + 4, e2);
401
    }
402

    
403
    /* set the new CPU state */
404
    /* from this point, any exception which occurs can give problems */
405
    env->cr[0] |= CR0_TS_MASK;
406
    env->hflags |= HF_TS_MASK;
407
    env->tr.selector = tss_selector;
408
    env->tr.base = tss_base;
409
    env->tr.limit = tss_limit;
410
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411

    
412
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
413
        cpu_x86_update_cr3(env, new_cr3);
414
    }
415

    
416
    /* load all registers without an exception, then reload them with
417
       possible exception */
418
    env->eip = new_eip;
419
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
420
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421
    if (!(type & 8))
422
        eflags_mask &= 0xffff;
423
    load_eflags(new_eflags, eflags_mask);
424
    /* XXX: what to do in 16 bit case ? */
425
    EAX = new_regs[0];
426
    ECX = new_regs[1];
427
    EDX = new_regs[2];
428
    EBX = new_regs[3];
429
    ESP = new_regs[4];
430
    EBP = new_regs[5];
431
    ESI = new_regs[6];
432
    EDI = new_regs[7];
433
    if (new_eflags & VM_MASK) {
434
        for(i = 0; i < 6; i++)
435
            load_seg_vm(i, new_segs[i]);
436
        /* in vm86, CPL is always 3 */
437
        cpu_x86_set_cpl(env, 3);
438
    } else {
439
        /* CPL is set the RPL of CS */
440
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441
        /* first just selectors as the rest may trigger exceptions */
442
        for(i = 0; i < 6; i++)
443
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
444
    }
445

    
446
    env->ldt.selector = new_ldt & ~4;
447
    env->ldt.base = 0;
448
    env->ldt.limit = 0;
449
    env->ldt.flags = 0;
450

    
451
    /* load the LDT */
452
    if (new_ldt & 4)
453
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454

    
455
    if ((new_ldt & 0xfffc) != 0) {
456
        dt = &env->gdt;
457
        index = new_ldt & ~7;
458
        if ((index + 7) > dt->limit)
459
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460
        ptr = dt->base + index;
461
        e1 = ldl_kernel(ptr);
462
        e2 = ldl_kernel(ptr + 4);
463
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        if (!(e2 & DESC_P_MASK))
466
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
468
    }
469

    
470
    /* load the segments */
471
    if (!(new_eflags & VM_MASK)) {
472
        tss_load_seg(R_CS, new_segs[R_CS]);
473
        tss_load_seg(R_SS, new_segs[R_SS]);
474
        tss_load_seg(R_ES, new_segs[R_ES]);
475
        tss_load_seg(R_DS, new_segs[R_DS]);
476
        tss_load_seg(R_FS, new_segs[R_FS]);
477
        tss_load_seg(R_GS, new_segs[R_GS]);
478
    }
479

    
480
    /* check that EIP is in the CS segment limits */
481
    if (new_eip > env->segs[R_CS].limit) {
482
        /* XXX: different exception if CALL ? */
483
        raise_exception_err(EXCP0D_GPF, 0);
484
    }
485
}
486

    
487
/* check if Port I/O is allowed in TSS */
488
static inline void check_io(int addr, int size)
489
{
490
    int io_offset, val, mask;
491

    
492
    /* TSS must be a valid 32 bit one */
493
    if (!(env->tr.flags & DESC_P_MASK) ||
494
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
495
        env->tr.limit < 103)
496
        goto fail;
497
    io_offset = lduw_kernel(env->tr.base + 0x66);
498
    io_offset += (addr >> 3);
499
    /* Note: the check needs two bytes */
500
    if ((io_offset + 1) > env->tr.limit)
501
        goto fail;
502
    val = lduw_kernel(env->tr.base + io_offset);
503
    val >>= (addr & 7);
504
    mask = (1 << size) - 1;
505
    /* all bits must be zero to allow the I/O */
506
    if ((val & mask) != 0) {
507
    fail:
508
        raise_exception_err(EXCP0D_GPF, 0);
509
    }
510
}
511

    
512
void helper_check_iob(uint32_t t0)
513
{
514
    check_io(t0, 1);
515
}
516

    
517
void helper_check_iow(uint32_t t0)
518
{
519
    check_io(t0, 2);
520
}
521

    
522
void helper_check_iol(uint32_t t0)
523
{
524
    check_io(t0, 4);
525
}
526

    
527
void helper_outb(uint32_t port, uint32_t data)
528
{
529
    cpu_outb(env, port, data & 0xff);
530
}
531

    
532
target_ulong helper_inb(uint32_t port)
533
{
534
    return cpu_inb(env, port);
535
}
536

    
537
void helper_outw(uint32_t port, uint32_t data)
538
{
539
    cpu_outw(env, port, data & 0xffff);
540
}
541

    
542
target_ulong helper_inw(uint32_t port)
543
{
544
    return cpu_inw(env, port);
545
}
546

    
547
void helper_outl(uint32_t port, uint32_t data)
548
{
549
    cpu_outl(env, port, data);
550
}
551

    
552
target_ulong helper_inl(uint32_t port)
553
{
554
    return cpu_inl(env, port);
555
}
556

    
557
static inline unsigned int get_sp_mask(unsigned int e2)
558
{
559
    if (e2 & DESC_B_MASK)
560
        return 0xffffffff;
561
    else
562
        return 0xffff;
563
}
564

    
565
#ifdef TARGET_X86_64
566
#define SET_ESP(val, sp_mask)\
567
do {\
568
    if ((sp_mask) == 0xffff)\
569
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
570
    else if ((sp_mask) == 0xffffffffLL)\
571
        ESP = (uint32_t)(val);\
572
    else\
573
        ESP = (val);\
574
} while (0)
575
#else
576
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
577
#endif
578

    
579
/* XXX: add a is_user flag to have proper security support */
580
#define PUSHW(ssp, sp, sp_mask, val)\
581
{\
582
    sp -= 2;\
583
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
584
}
585

    
586
#define PUSHL(ssp, sp, sp_mask, val)\
587
{\
588
    sp -= 4;\
589
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
590
}
591

    
592
#define POPW(ssp, sp, sp_mask, val)\
593
{\
594
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
595
    sp += 2;\
596
}
597

    
598
#define POPL(ssp, sp, sp_mask, val)\
599
{\
600
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
601
    sp += 4;\
602
}
603

    
604
/* protected mode interrupt */
605
static void do_interrupt_protected(int intno, int is_int, int error_code,
606
                                   unsigned int next_eip, int is_hw)
607
{
608
    SegmentCache *dt;
609
    target_ulong ptr, ssp;
610
    int type, dpl, selector, ss_dpl, cpl;
611
    int has_error_code, new_stack, shift;
612
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
613
    uint32_t old_eip, sp_mask;
614
    int svm_should_check = 1;
615

    
616
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
617
        next_eip = EIP;
618
        svm_should_check = 0;
619
    }
620

    
621
    if (svm_should_check
622
        && (INTERCEPTEDl(_exceptions, 1 << intno)
623
        && !is_int)) {
624
        raise_interrupt(intno, is_int, error_code, 0);
625
    }
626
    has_error_code = 0;
627
    if (!is_int && !is_hw) {
628
        switch(intno) {
629
        case 8:
630
        case 10:
631
        case 11:
632
        case 12:
633
        case 13:
634
        case 14:
635
        case 17:
636
            has_error_code = 1;
637
            break;
638
        }
639
    }
640
    if (is_int)
641
        old_eip = next_eip;
642
    else
643
        old_eip = env->eip;
644

    
645
    dt = &env->idt;
646
    if (intno * 8 + 7 > dt->limit)
647
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648
    ptr = dt->base + intno * 8;
649
    e1 = ldl_kernel(ptr);
650
    e2 = ldl_kernel(ptr + 4);
651
    /* check gate type */
652
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
653
    switch(type) {
654
    case 5: /* task gate */
655
        /* must do that check here to return the correct error code */
656
        if (!(e2 & DESC_P_MASK))
657
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
659
        if (has_error_code) {
660
            int type;
661
            uint32_t mask;
662
            /* push the error code */
663
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
664
            shift = type >> 3;
665
            if (env->segs[R_SS].flags & DESC_B_MASK)
666
                mask = 0xffffffff;
667
            else
668
                mask = 0xffff;
669
            esp = (ESP - (2 << shift)) & mask;
670
            ssp = env->segs[R_SS].base + esp;
671
            if (shift)
672
                stl_kernel(ssp, error_code);
673
            else
674
                stw_kernel(ssp, error_code);
675
            SET_ESP(esp, mask);
676
        }
677
        return;
678
    case 6: /* 286 interrupt gate */
679
    case 7: /* 286 trap gate */
680
    case 14: /* 386 interrupt gate */
681
    case 15: /* 386 trap gate */
682
        break;
683
    default:
684
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
685
        break;
686
    }
687
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688
    cpl = env->hflags & HF_CPL_MASK;
689
    /* check privledge if software int */
690
    if (is_int && dpl < cpl)
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
    /* check valid bit */
693
    if (!(e2 & DESC_P_MASK))
694
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
695
    selector = e1 >> 16;
696
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
697
    if ((selector & 0xfffc) == 0)
698
        raise_exception_err(EXCP0D_GPF, 0);
699

    
700
    if (load_segment(&e1, &e2, selector) != 0)
701
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
702
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
703
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
705
    if (dpl > cpl)
706
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707
    if (!(e2 & DESC_P_MASK))
708
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
709
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
710
        /* to inner privilege */
711
        get_ss_esp_from_tss(&ss, &esp, dpl);
712
        if ((ss & 0xfffc) == 0)
713
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714
        if ((ss & 3) != dpl)
715
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
716
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
717
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
719
        if (ss_dpl != dpl)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if (!(ss_e2 & DESC_S_MASK) ||
722
            (ss_e2 & DESC_CS_MASK) ||
723
            !(ss_e2 & DESC_W_MASK))
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        if (!(ss_e2 & DESC_P_MASK))
726
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
727
        new_stack = 1;
728
        sp_mask = get_sp_mask(ss_e2);
729
        ssp = get_seg_base(ss_e1, ss_e2);
730
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
731
        /* to same privilege */
732
        if (env->eflags & VM_MASK)
733
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
        new_stack = 0;
735
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
736
        ssp = env->segs[R_SS].base;
737
        esp = ESP;
738
        dpl = cpl;
739
    } else {
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0; /* avoid warning */
742
        sp_mask = 0; /* avoid warning */
743
        ssp = 0; /* avoid warning */
744
        esp = 0; /* avoid warning */
745
    }
746

    
747
    shift = type >> 3;
748

    
749
#if 0
750
    /* XXX: check that enough room is available */
751
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
752
    if (env->eflags & VM_MASK)
753
        push_size += 8;
754
    push_size <<= shift;
755
#endif
756
    if (shift == 1) {
757
        if (new_stack) {
758
            if (env->eflags & VM_MASK) {
759
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
760
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
761
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
762
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
763
            }
764
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
765
            PUSHL(ssp, esp, sp_mask, ESP);
766
        }
767
        PUSHL(ssp, esp, sp_mask, compute_eflags());
768
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
769
        PUSHL(ssp, esp, sp_mask, old_eip);
770
        if (has_error_code) {
771
            PUSHL(ssp, esp, sp_mask, error_code);
772
        }
773
    } else {
774
        if (new_stack) {
775
            if (env->eflags & VM_MASK) {
776
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
777
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
778
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
779
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
780
            }
781
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
782
            PUSHW(ssp, esp, sp_mask, ESP);
783
        }
784
        PUSHW(ssp, esp, sp_mask, compute_eflags());
785
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
786
        PUSHW(ssp, esp, sp_mask, old_eip);
787
        if (has_error_code) {
788
            PUSHW(ssp, esp, sp_mask, error_code);
789
        }
790
    }
791

    
792
    if (new_stack) {
793
        if (env->eflags & VM_MASK) {
794
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798
        }
799
        ss = (ss & ~3) | dpl;
800
        cpu_x86_load_seg_cache(env, R_SS, ss,
801
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802
    }
803
    SET_ESP(esp, sp_mask);
804

    
805
    selector = (selector & ~3) | dpl;
806
    cpu_x86_load_seg_cache(env, R_CS, selector,
807
                   get_seg_base(e1, e2),
808
                   get_seg_limit(e1, e2),
809
                   e2);
810
    cpu_x86_set_cpl(env, dpl);
811
    env->eip = offset;
812

    
813
    /* interrupt gate clear IF mask */
814
    if ((type & 1) == 0) {
815
        env->eflags &= ~IF_MASK;
816
    }
817
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
818
}
819

    
820
#ifdef TARGET_X86_64
821

    
822
#define PUSHQ(sp, val)\
823
{\
824
    sp -= 8;\
825
    stq_kernel(sp, (val));\
826
}
827

    
828
#define POPQ(sp, val)\
829
{\
830
    val = ldq_kernel(sp);\
831
    sp += 8;\
832
}
833

    
834
static inline target_ulong get_rsp_from_tss(int level)
835
{
836
    int index;
837

    
838
#if 0
839
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840
           env->tr.base, env->tr.limit);
841
#endif
842

    
843
    if (!(env->tr.flags & DESC_P_MASK))
844
        cpu_abort(env, "invalid tss");
845
    index = 8 * level + 4;
846
    if ((index + 7) > env->tr.limit)
847
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
848
    return ldq_kernel(env->tr.base + index);
849
}
850

    
851
/* 64 bit interrupt */
852
static void do_interrupt64(int intno, int is_int, int error_code,
853
                           target_ulong next_eip, int is_hw)
854
{
855
    SegmentCache *dt;
856
    target_ulong ptr;
857
    int type, dpl, selector, cpl, ist;
858
    int has_error_code, new_stack;
859
    uint32_t e1, e2, e3, ss;
860
    target_ulong old_eip, esp, offset;
861
    int svm_should_check = 1;
862

    
863
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
864
        next_eip = EIP;
865
        svm_should_check = 0;
866
    }
867
    if (svm_should_check
868
        && INTERCEPTEDl(_exceptions, 1 << intno)
869
        && !is_int) {
870
        raise_interrupt(intno, is_int, error_code, 0);
871
    }
872
    has_error_code = 0;
873
    if (!is_int && !is_hw) {
874
        switch(intno) {
875
        case 8:
876
        case 10:
877
        case 11:
878
        case 12:
879
        case 13:
880
        case 14:
881
        case 17:
882
            has_error_code = 1;
883
            break;
884
        }
885
    }
886
    if (is_int)
887
        old_eip = next_eip;
888
    else
889
        old_eip = env->eip;
890

    
891
    dt = &env->idt;
892
    if (intno * 16 + 15 > dt->limit)
893
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
894
    ptr = dt->base + intno * 16;
895
    e1 = ldl_kernel(ptr);
896
    e2 = ldl_kernel(ptr + 4);
897
    e3 = ldl_kernel(ptr + 8);
898
    /* check gate type */
899
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
900
    switch(type) {
901
    case 14: /* 386 interrupt gate */
902
    case 15: /* 386 trap gate */
903
        break;
904
    default:
905
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906
        break;
907
    }
908
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909
    cpl = env->hflags & HF_CPL_MASK;
910
    /* check privledge if software int */
911
    if (is_int && dpl < cpl)
912
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913
    /* check valid bit */
914
    if (!(e2 & DESC_P_MASK))
915
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
916
    selector = e1 >> 16;
917
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
918
    ist = e2 & 7;
919
    if ((selector & 0xfffc) == 0)
920
        raise_exception_err(EXCP0D_GPF, 0);
921

    
922
    if (load_segment(&e1, &e2, selector) != 0)
923
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
927
    if (dpl > cpl)
928
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929
    if (!(e2 & DESC_P_MASK))
930
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
931
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
934
        /* to inner privilege */
935
        if (ist != 0)
936
            esp = get_rsp_from_tss(ist + 3);
937
        else
938
            esp = get_rsp_from_tss(dpl);
939
        esp &= ~0xfLL; /* align stack */
940
        ss = 0;
941
        new_stack = 1;
942
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
943
        /* to same privilege */
944
        if (env->eflags & VM_MASK)
945
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
        new_stack = 0;
947
        if (ist != 0)
948
            esp = get_rsp_from_tss(ist + 3);
949
        else
950
            esp = ESP;
951
        esp &= ~0xfLL; /* align stack */
952
        dpl = cpl;
953
    } else {
954
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0; /* avoid warning */
956
        esp = 0; /* avoid warning */
957
    }
958

    
959
    PUSHQ(esp, env->segs[R_SS].selector);
960
    PUSHQ(esp, ESP);
961
    PUSHQ(esp, compute_eflags());
962
    PUSHQ(esp, env->segs[R_CS].selector);
963
    PUSHQ(esp, old_eip);
964
    if (has_error_code) {
965
        PUSHQ(esp, error_code);
966
    }
967

    
968
    if (new_stack) {
969
        ss = 0 | dpl;
970
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
971
    }
972
    ESP = esp;
973

    
974
    selector = (selector & ~3) | dpl;
975
    cpu_x86_load_seg_cache(env, R_CS, selector,
976
                   get_seg_base(e1, e2),
977
                   get_seg_limit(e1, e2),
978
                   e2);
979
    cpu_x86_set_cpl(env, dpl);
980
    env->eip = offset;
981

    
982
    /* interrupt gate clear IF mask */
983
    if ((type & 1) == 0) {
984
        env->eflags &= ~IF_MASK;
985
    }
986
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
987
}
988
#endif
989

    
990
#if defined(CONFIG_USER_ONLY)
991
void helper_syscall(int next_eip_addend)
992
{
993
    env->exception_index = EXCP_SYSCALL;
994
    env->exception_next_eip = env->eip + next_eip_addend;
995
    cpu_loop_exit();
996
}
997
#else
998
void helper_syscall(int next_eip_addend)
999
{
1000
    int selector;
1001

    
1002
    if (!(env->efer & MSR_EFER_SCE)) {
1003
        raise_exception_err(EXCP06_ILLOP, 0);
1004
    }
1005
    selector = (env->star >> 32) & 0xffff;
1006
#ifdef TARGET_X86_64
1007
    if (env->hflags & HF_LMA_MASK) {
1008
        int code64;
1009

    
1010
        ECX = env->eip + next_eip_addend;
1011
        env->regs[11] = compute_eflags();
1012

    
1013
        code64 = env->hflags & HF_CS64_MASK;
1014

    
1015
        cpu_x86_set_cpl(env, 0);
1016
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1017
                           0, 0xffffffff,
1018
                               DESC_G_MASK | DESC_P_MASK |
1019
                               DESC_S_MASK |
1020
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1021
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1022
                               0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_W_MASK | DESC_A_MASK);
1026
        env->eflags &= ~env->fmask;
1027
        load_eflags(env->eflags, 0);
1028
        if (code64)
1029
            env->eip = env->lstar;
1030
        else
1031
            env->eip = env->cstar;
1032
    } else
1033
#endif
1034
    {
1035
        ECX = (uint32_t)(env->eip + next_eip_addend);
1036

    
1037
        cpu_x86_set_cpl(env, 0);
1038
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1039
                           0, 0xffffffff,
1040
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1041
                               DESC_S_MASK |
1042
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1043
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1044
                               0, 0xffffffff,
1045
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
                               DESC_S_MASK |
1047
                               DESC_W_MASK | DESC_A_MASK);
1048
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1049
        env->eip = (uint32_t)env->star;
1050
    }
1051
}
1052
#endif
1053

    
1054
void helper_sysret(int dflag)
1055
{
1056
    int cpl, selector;
1057

    
1058
    if (!(env->efer & MSR_EFER_SCE)) {
1059
        raise_exception_err(EXCP06_ILLOP, 0);
1060
    }
1061
    cpl = env->hflags & HF_CPL_MASK;
1062
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1063
        raise_exception_err(EXCP0D_GPF, 0);
1064
    }
1065
    selector = (env->star >> 48) & 0xffff;
1066
#ifdef TARGET_X86_64
1067
    if (env->hflags & HF_LMA_MASK) {
1068
        if (dflag == 2) {
1069
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1070
                                   0, 0xffffffff,
1071
                                   DESC_G_MASK | DESC_P_MASK |
1072
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1074
                                   DESC_L_MASK);
1075
            env->eip = ECX;
1076
        } else {
1077
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082
            env->eip = (uint32_t)ECX;
1083
        }
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1090
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1091
        cpu_x86_set_cpl(env, 3);
1092
    } else
1093
#endif
1094
    {
1095
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1096
                               0, 0xffffffff,
1097
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1098
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1100
        env->eip = (uint32_t)ECX;
1101
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_W_MASK | DESC_A_MASK);
1106
        env->eflags |= IF_MASK;
1107
        cpu_x86_set_cpl(env, 3);
1108
    }
1109
#ifdef USE_KQEMU
1110
    if (kqemu_is_ok(env)) {
1111
        if (env->hflags & HF_LMA_MASK)
1112
            CC_OP = CC_OP_EFLAGS;
1113
        env->exception_index = -1;
1114
        cpu_loop_exit();
1115
    }
1116
#endif
1117
}
1118

    
1119
/* real mode interrupt */
1120
static void do_interrupt_real(int intno, int is_int, int error_code,
1121
                              unsigned int next_eip)
1122
{
1123
    SegmentCache *dt;
1124
    target_ulong ptr, ssp;
1125
    int selector;
1126
    uint32_t offset, esp;
1127
    uint32_t old_cs, old_eip;
1128
    int svm_should_check = 1;
1129

    
1130
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1131
        next_eip = EIP;
1132
        svm_should_check = 0;
1133
    }
1134
    if (svm_should_check
1135
        && INTERCEPTEDl(_exceptions, 1 << intno)
1136
        && !is_int) {
1137
        raise_interrupt(intno, is_int, error_code, 0);
1138
    }
1139
    /* real mode (simpler !) */
1140
    dt = &env->idt;
1141
    if (intno * 4 + 3 > dt->limit)
1142
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1143
    ptr = dt->base + intno * 4;
1144
    offset = lduw_kernel(ptr);
1145
    selector = lduw_kernel(ptr + 2);
1146
    esp = ESP;
1147
    ssp = env->segs[R_SS].base;
1148
    if (is_int)
1149
        old_eip = next_eip;
1150
    else
1151
        old_eip = env->eip;
1152
    old_cs = env->segs[R_CS].selector;
1153
    /* XXX: use SS segment size ? */
1154
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1155
    PUSHW(ssp, esp, 0xffff, old_cs);
1156
    PUSHW(ssp, esp, 0xffff, old_eip);
1157

    
1158
    /* update processor state */
1159
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1160
    env->eip = offset;
1161
    env->segs[R_CS].selector = selector;
1162
    env->segs[R_CS].base = (selector << 4);
1163
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1164
}
1165

    
1166
/* fake user mode interrupt */
1167
void do_interrupt_user(int intno, int is_int, int error_code,
1168
                       target_ulong next_eip)
1169
{
1170
    SegmentCache *dt;
1171
    target_ulong ptr;
1172
    int dpl, cpl, shift;
1173
    uint32_t e2;
1174

    
1175
    dt = &env->idt;
1176
    if (env->hflags & HF_LMA_MASK) {
1177
        shift = 4;
1178
    } else {
1179
        shift = 3;
1180
    }
1181
    ptr = dt->base + (intno << shift);
1182
    e2 = ldl_kernel(ptr + 4);
1183

    
1184
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1185
    cpl = env->hflags & HF_CPL_MASK;
1186
    /* check privledge if software int */
1187
    if (is_int && dpl < cpl)
1188
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1189

    
1190
    /* Since we emulate only user space, we cannot do more than
1191
       exiting the emulation with the suitable exception and error
1192
       code */
1193
    if (is_int)
1194
        EIP = next_eip;
1195
}
1196

    
1197
/*
1198
 * Begin execution of an interruption. is_int is TRUE if coming from
1199
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1200
 * instruction. It is only relevant if is_int is TRUE.
1201
 */
1202
void do_interrupt(int intno, int is_int, int error_code,
1203
                  target_ulong next_eip, int is_hw)
1204
{
1205
    if (loglevel & CPU_LOG_INT) {
1206
        if ((env->cr[0] & CR0_PE_MASK)) {
1207
            static int count;
1208
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1209
                    count, intno, error_code, is_int,
1210
                    env->hflags & HF_CPL_MASK,
1211
                    env->segs[R_CS].selector, EIP,
1212
                    (int)env->segs[R_CS].base + EIP,
1213
                    env->segs[R_SS].selector, ESP);
1214
            if (intno == 0x0e) {
1215
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1216
            } else {
1217
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1218
            }
1219
            fprintf(logfile, "\n");
1220
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1221
#if 0
1222
            {
1223
                int i;
1224
                uint8_t *ptr;
1225
                fprintf(logfile, "       code=");
1226
                ptr = env->segs[R_CS].base + env->eip;
1227
                for(i = 0; i < 16; i++) {
1228
                    fprintf(logfile, " %02x", ldub(ptr + i));
1229
                }
1230
                fprintf(logfile, "\n");
1231
            }
1232
#endif
1233
            count++;
1234
        }
1235
    }
1236
    if (env->cr[0] & CR0_PE_MASK) {
1237
#if TARGET_X86_64
1238
        if (env->hflags & HF_LMA_MASK) {
1239
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1240
        } else
1241
#endif
1242
        {
1243
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1244
        }
1245
    } else {
1246
        do_interrupt_real(intno, is_int, error_code, next_eip);
1247
    }
1248
}
1249

    
1250
/*
1251
 * Check nested exceptions and change to double or triple fault if
1252
 * needed. It should only be called, if this is not an interrupt.
1253
 * Returns the new exception number.
1254
 */
1255
static int check_exception(int intno, int *error_code)
1256
{
1257
    int first_contributory = env->old_exception == 0 ||
1258
                              (env->old_exception >= 10 &&
1259
                               env->old_exception <= 13);
1260
    int second_contributory = intno == 0 ||
1261
                               (intno >= 10 && intno <= 13);
1262

    
1263
    if (loglevel & CPU_LOG_INT)
1264
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1265
                env->old_exception, intno);
1266

    
1267
    if (env->old_exception == EXCP08_DBLE)
1268
        cpu_abort(env, "triple fault");
1269

    
1270
    if ((first_contributory && second_contributory)
1271
        || (env->old_exception == EXCP0E_PAGE &&
1272
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1273
        intno = EXCP08_DBLE;
1274
        *error_code = 0;
1275
    }
1276

    
1277
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1278
        (intno == EXCP08_DBLE))
1279
        env->old_exception = intno;
1280

    
1281
    return intno;
1282
}
1283

    
1284
/*
1285
 * Signal an interruption. It is executed in the main CPU loop.
1286
 * is_int is TRUE if coming from the int instruction. next_eip is the
1287
 * EIP value AFTER the interrupt instruction. It is only relevant if
1288
 * is_int is TRUE.
1289
 */
1290
void raise_interrupt(int intno, int is_int, int error_code,
1291
                     int next_eip_addend)
1292
{
1293
    if (!is_int) {
1294
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1295
        intno = check_exception(intno, &error_code);
1296
    }
1297

    
1298
    env->exception_index = intno;
1299
    env->error_code = error_code;
1300
    env->exception_is_int = is_int;
1301
    env->exception_next_eip = env->eip + next_eip_addend;
1302
    cpu_loop_exit();
1303
}
1304

    
1305
/* same as raise_exception_err, but do not restore global registers */
1306
static void raise_exception_err_norestore(int exception_index, int error_code)
1307
{
1308
    exception_index = check_exception(exception_index, &error_code);
1309

    
1310
    env->exception_index = exception_index;
1311
    env->error_code = error_code;
1312
    env->exception_is_int = 0;
1313
    env->exception_next_eip = 0;
1314
    longjmp(env->jmp_env, 1);
1315
}
1316

    
1317
/* shortcuts to generate exceptions */
1318

    
1319
void (raise_exception_err)(int exception_index, int error_code)
1320
{
1321
    raise_interrupt(exception_index, 0, error_code, 0);
1322
}
1323

    
1324
void raise_exception(int exception_index)
1325
{
1326
    raise_interrupt(exception_index, 0, 0, 0);
1327
}
1328

    
1329
/* SMM support */
1330

    
1331
#if defined(CONFIG_USER_ONLY)
1332

    
1333
void do_smm_enter(void)
1334
{
1335
}
1336

    
1337
void helper_rsm(void)
1338
{
1339
}
1340

    
1341
#else
1342

    
1343
#ifdef TARGET_X86_64
1344
#define SMM_REVISION_ID 0x00020064
1345
#else
1346
#define SMM_REVISION_ID 0x00020000
1347
#endif
1348

    
1349
void do_smm_enter(void)
1350
{
1351
    target_ulong sm_state;
1352
    SegmentCache *dt;
1353
    int i, offset;
1354

    
1355
    if (loglevel & CPU_LOG_INT) {
1356
        fprintf(logfile, "SMM: enter\n");
1357
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1358
    }
1359

    
1360
    env->hflags |= HF_SMM_MASK;
1361
    cpu_smm_update(env);
1362

    
1363
    sm_state = env->smbase + 0x8000;
1364

    
1365
#ifdef TARGET_X86_64
1366
    for(i = 0; i < 6; i++) {
1367
        dt = &env->segs[i];
1368
        offset = 0x7e00 + i * 16;
1369
        stw_phys(sm_state + offset, dt->selector);
1370
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371
        stl_phys(sm_state + offset + 4, dt->limit);
1372
        stq_phys(sm_state + offset + 8, dt->base);
1373
    }
1374

    
1375
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1376
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1377

    
1378
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1380
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1382

    
1383
    stq_phys(sm_state + 0x7e88, env->idt.base);
1384
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1385

    
1386
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1387
    stq_phys(sm_state + 0x7e98, env->tr.base);
1388
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1389
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1390

    
1391
    stq_phys(sm_state + 0x7ed0, env->efer);
1392

    
1393
    stq_phys(sm_state + 0x7ff8, EAX);
1394
    stq_phys(sm_state + 0x7ff0, ECX);
1395
    stq_phys(sm_state + 0x7fe8, EDX);
1396
    stq_phys(sm_state + 0x7fe0, EBX);
1397
    stq_phys(sm_state + 0x7fd8, ESP);
1398
    stq_phys(sm_state + 0x7fd0, EBP);
1399
    stq_phys(sm_state + 0x7fc8, ESI);
1400
    stq_phys(sm_state + 0x7fc0, EDI);
1401
    for(i = 8; i < 16; i++)
1402
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403
    stq_phys(sm_state + 0x7f78, env->eip);
1404
    stl_phys(sm_state + 0x7f70, compute_eflags());
1405
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1406
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1407

    
1408
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1409
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1410
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1411

    
1412
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413
    stl_phys(sm_state + 0x7f00, env->smbase);
1414
#else
1415
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1418
    stl_phys(sm_state + 0x7ff0, env->eip);
1419
    stl_phys(sm_state + 0x7fec, EDI);
1420
    stl_phys(sm_state + 0x7fe8, ESI);
1421
    stl_phys(sm_state + 0x7fe4, EBP);
1422
    stl_phys(sm_state + 0x7fe0, ESP);
1423
    stl_phys(sm_state + 0x7fdc, EBX);
1424
    stl_phys(sm_state + 0x7fd8, EDX);
1425
    stl_phys(sm_state + 0x7fd4, ECX);
1426
    stl_phys(sm_state + 0x7fd0, EAX);
1427
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1429

    
1430
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431
    stl_phys(sm_state + 0x7f64, env->tr.base);
1432
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1433
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1434

    
1435
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1437
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1439

    
1440
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1441
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1442

    
1443
    stl_phys(sm_state + 0x7f58, env->idt.base);
1444
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1445

    
1446
    for(i = 0; i < 6; i++) {
1447
        dt = &env->segs[i];
1448
        if (i < 3)
1449
            offset = 0x7f84 + i * 12;
1450
        else
1451
            offset = 0x7f2c + (i - 3) * 12;
1452
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453
        stl_phys(sm_state + offset + 8, dt->base);
1454
        stl_phys(sm_state + offset + 4, dt->limit);
1455
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1456
    }
1457
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1458

    
1459
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460
    stl_phys(sm_state + 0x7ef8, env->smbase);
1461
#endif
1462
    /* init SMM cpu state */
1463

    
1464
#ifdef TARGET_X86_64
1465
    env->efer = 0;
1466
    env->hflags &= ~HF_LMA_MASK;
1467
#endif
1468
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1469
    env->eip = 0x00008000;
1470
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1471
                           0xffffffff, 0);
1472
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1473
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1474
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1475
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1476
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1477

    
1478
    cpu_x86_update_cr0(env,
1479
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1480
    cpu_x86_update_cr4(env, 0);
1481
    env->dr[7] = 0x00000400;
1482
    CC_OP = CC_OP_EFLAGS;
1483
}
1484

    
1485
void helper_rsm(void)
1486
{
1487
    target_ulong sm_state;
1488
    int i, offset;
1489
    uint32_t val;
1490

    
1491
    sm_state = env->smbase + 0x8000;
1492
#ifdef TARGET_X86_64
1493
    env->efer = ldq_phys(sm_state + 0x7ed0);
1494
    if (env->efer & MSR_EFER_LMA)
1495
        env->hflags |= HF_LMA_MASK;
1496
    else
1497
        env->hflags &= ~HF_LMA_MASK;
1498

    
1499
    for(i = 0; i < 6; i++) {
1500
        offset = 0x7e00 + i * 16;
1501
        cpu_x86_load_seg_cache(env, i,
1502
                               lduw_phys(sm_state + offset),
1503
                               ldq_phys(sm_state + offset + 8),
1504
                               ldl_phys(sm_state + offset + 4),
1505
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1506
    }
1507

    
1508
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1509
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1510

    
1511
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1512
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1513
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1514
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1515

    
1516
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1517
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1518

    
1519
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1520
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1521
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1522
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1523

    
1524
    EAX = ldq_phys(sm_state + 0x7ff8);
1525
    ECX = ldq_phys(sm_state + 0x7ff0);
1526
    EDX = ldq_phys(sm_state + 0x7fe8);
1527
    EBX = ldq_phys(sm_state + 0x7fe0);
1528
    ESP = ldq_phys(sm_state + 0x7fd8);
1529
    EBP = ldq_phys(sm_state + 0x7fd0);
1530
    ESI = ldq_phys(sm_state + 0x7fc8);
1531
    EDI = ldq_phys(sm_state + 0x7fc0);
1532
    for(i = 8; i < 16; i++)
1533
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1534
    env->eip = ldq_phys(sm_state + 0x7f78);
1535
    load_eflags(ldl_phys(sm_state + 0x7f70),
1536
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1538
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1539

    
1540
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1541
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1542
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1543

    
1544
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1545
    if (val & 0x20000) {
1546
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1547
    }
1548
#else
1549
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1550
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1551
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1552
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1553
    env->eip = ldl_phys(sm_state + 0x7ff0);
1554
    EDI = ldl_phys(sm_state + 0x7fec);
1555
    ESI = ldl_phys(sm_state + 0x7fe8);
1556
    EBP = ldl_phys(sm_state + 0x7fe4);
1557
    ESP = ldl_phys(sm_state + 0x7fe0);
1558
    EBX = ldl_phys(sm_state + 0x7fdc);
1559
    EDX = ldl_phys(sm_state + 0x7fd8);
1560
    ECX = ldl_phys(sm_state + 0x7fd4);
1561
    EAX = ldl_phys(sm_state + 0x7fd0);
1562
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1563
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1564

    
1565
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1566
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1567
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1568
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1569

    
1570
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1571
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1572
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1573
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1574

    
1575
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1576
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1577

    
1578
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1579
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1580

    
1581
    for(i = 0; i < 6; i++) {
1582
        if (i < 3)
1583
            offset = 0x7f84 + i * 12;
1584
        else
1585
            offset = 0x7f2c + (i - 3) * 12;
1586
        cpu_x86_load_seg_cache(env, i,
1587
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1588
                               ldl_phys(sm_state + offset + 8),
1589
                               ldl_phys(sm_state + offset + 4),
1590
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1591
    }
1592
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1593

    
1594
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1595
    if (val & 0x20000) {
1596
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1597
    }
1598
#endif
1599
    CC_OP = CC_OP_EFLAGS;
1600
    env->hflags &= ~HF_SMM_MASK;
1601
    cpu_smm_update(env);
1602

    
1603
    if (loglevel & CPU_LOG_INT) {
1604
        fprintf(logfile, "SMM: after RSM\n");
1605
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1606
    }
1607
}
1608

    
1609
#endif /* !CONFIG_USER_ONLY */
1610

    
1611

    
1612
/* division, flags are undefined */
1613

    
1614
void helper_divb_AL(target_ulong t0)
1615
{
1616
    unsigned int num, den, q, r;
1617

    
1618
    num = (EAX & 0xffff);
1619
    den = (t0 & 0xff);
1620
    if (den == 0) {
1621
        raise_exception(EXCP00_DIVZ);
1622
    }
1623
    q = (num / den);
1624
    if (q > 0xff)
1625
        raise_exception(EXCP00_DIVZ);
1626
    q &= 0xff;
1627
    r = (num % den) & 0xff;
1628
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1629
}
1630

    
1631
void helper_idivb_AL(target_ulong t0)
1632
{
1633
    int num, den, q, r;
1634

    
1635
    num = (int16_t)EAX;
1636
    den = (int8_t)t0;
1637
    if (den == 0) {
1638
        raise_exception(EXCP00_DIVZ);
1639
    }
1640
    q = (num / den);
1641
    if (q != (int8_t)q)
1642
        raise_exception(EXCP00_DIVZ);
1643
    q &= 0xff;
1644
    r = (num % den) & 0xff;
1645
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1646
}
1647

    
1648
void helper_divw_AX(target_ulong t0)
1649
{
1650
    unsigned int num, den, q, r;
1651

    
1652
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1653
    den = (t0 & 0xffff);
1654
    if (den == 0) {
1655
        raise_exception(EXCP00_DIVZ);
1656
    }
1657
    q = (num / den);
1658
    if (q > 0xffff)
1659
        raise_exception(EXCP00_DIVZ);
1660
    q &= 0xffff;
1661
    r = (num % den) & 0xffff;
1662
    EAX = (EAX & ~0xffff) | q;
1663
    EDX = (EDX & ~0xffff) | r;
1664
}
1665

    
1666
void helper_idivw_AX(target_ulong t0)
1667
{
1668
    int num, den, q, r;
1669

    
1670
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1671
    den = (int16_t)t0;
1672
    if (den == 0) {
1673
        raise_exception(EXCP00_DIVZ);
1674
    }
1675
    q = (num / den);
1676
    if (q != (int16_t)q)
1677
        raise_exception(EXCP00_DIVZ);
1678
    q &= 0xffff;
1679
    r = (num % den) & 0xffff;
1680
    EAX = (EAX & ~0xffff) | q;
1681
    EDX = (EDX & ~0xffff) | r;
1682
}
1683

    
1684
void helper_divl_EAX(target_ulong t0)
1685
{
1686
    unsigned int den, r;
1687
    uint64_t num, q;
1688

    
1689
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1690
    den = t0;
1691
    if (den == 0) {
1692
        raise_exception(EXCP00_DIVZ);
1693
    }
1694
    q = (num / den);
1695
    r = (num % den);
1696
    if (q > 0xffffffff)
1697
        raise_exception(EXCP00_DIVZ);
1698
    EAX = (uint32_t)q;
1699
    EDX = (uint32_t)r;
1700
}
1701

    
1702
void helper_idivl_EAX(target_ulong t0)
1703
{
1704
    int den, r;
1705
    int64_t num, q;
1706

    
1707
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708
    den = t0;
1709
    if (den == 0) {
1710
        raise_exception(EXCP00_DIVZ);
1711
    }
1712
    q = (num / den);
1713
    r = (num % den);
1714
    if (q != (int32_t)q)
1715
        raise_exception(EXCP00_DIVZ);
1716
    EAX = (uint32_t)q;
1717
    EDX = (uint32_t)r;
1718
}
1719

    
1720
/* bcd */
1721

    
1722
/* XXX: exception */
1723
void helper_aam(int base)
1724
{
1725
    int al, ah;
1726
    al = EAX & 0xff;
1727
    ah = al / base;
1728
    al = al % base;
1729
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1730
    CC_DST = al;
1731
}
1732

    
1733
void helper_aad(int base)
1734
{
1735
    int al, ah;
1736
    al = EAX & 0xff;
1737
    ah = (EAX >> 8) & 0xff;
1738
    al = ((ah * base) + al) & 0xff;
1739
    EAX = (EAX & ~0xffff) | al;
1740
    CC_DST = al;
1741
}
1742

    
1743
void helper_aaa(void)
1744
{
1745
    int icarry;
1746
    int al, ah, af;
1747
    int eflags;
1748

    
1749
    eflags = cc_table[CC_OP].compute_all();
1750
    af = eflags & CC_A;
1751
    al = EAX & 0xff;
1752
    ah = (EAX >> 8) & 0xff;
1753

    
1754
    icarry = (al > 0xf9);
1755
    if (((al & 0x0f) > 9 ) || af) {
1756
        al = (al + 6) & 0x0f;
1757
        ah = (ah + 1 + icarry) & 0xff;
1758
        eflags |= CC_C | CC_A;
1759
    } else {
1760
        eflags &= ~(CC_C | CC_A);
1761
        al &= 0x0f;
1762
    }
1763
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1764
    CC_SRC = eflags;
1765
    FORCE_RET();
1766
}
1767

    
1768
void helper_aas(void)
1769
{
1770
    int icarry;
1771
    int al, ah, af;
1772
    int eflags;
1773

    
1774
    eflags = cc_table[CC_OP].compute_all();
1775
    af = eflags & CC_A;
1776
    al = EAX & 0xff;
1777
    ah = (EAX >> 8) & 0xff;
1778

    
1779
    icarry = (al < 6);
1780
    if (((al & 0x0f) > 9 ) || af) {
1781
        al = (al - 6) & 0x0f;
1782
        ah = (ah - 1 - icarry) & 0xff;
1783
        eflags |= CC_C | CC_A;
1784
    } else {
1785
        eflags &= ~(CC_C | CC_A);
1786
        al &= 0x0f;
1787
    }
1788
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1789
    CC_SRC = eflags;
1790
    FORCE_RET();
1791
}
1792

    
1793
void helper_daa(void)
1794
{
1795
    int al, af, cf;
1796
    int eflags;
1797

    
1798
    eflags = cc_table[CC_OP].compute_all();
1799
    cf = eflags & CC_C;
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802

    
1803
    eflags = 0;
1804
    if (((al & 0x0f) > 9 ) || af) {
1805
        al = (al + 6) & 0xff;
1806
        eflags |= CC_A;
1807
    }
1808
    if ((al > 0x9f) || cf) {
1809
        al = (al + 0x60) & 0xff;
1810
        eflags |= CC_C;
1811
    }
1812
    EAX = (EAX & ~0xff) | al;
1813
    /* well, speed is not an issue here, so we compute the flags by hand */
1814
    eflags |= (al == 0) << 6; /* zf */
1815
    eflags |= parity_table[al]; /* pf */
1816
    eflags |= (al & 0x80); /* sf */
1817
    CC_SRC = eflags;
1818
    FORCE_RET();
1819
}
1820

    
1821
void helper_das(void)
1822
{
1823
    int al, al1, af, cf;
1824
    int eflags;
1825

    
1826
    eflags = cc_table[CC_OP].compute_all();
1827
    cf = eflags & CC_C;
1828
    af = eflags & CC_A;
1829
    al = EAX & 0xff;
1830

    
1831
    eflags = 0;
1832
    al1 = al;
1833
    if (((al & 0x0f) > 9 ) || af) {
1834
        eflags |= CC_A;
1835
        if (al < 6 || cf)
1836
            eflags |= CC_C;
1837
        al = (al - 6) & 0xff;
1838
    }
1839
    if ((al1 > 0x99) || cf) {
1840
        al = (al - 0x60) & 0xff;
1841
        eflags |= CC_C;
1842
    }
1843
    EAX = (EAX & ~0xff) | al;
1844
    /* well, speed is not an issue here, so we compute the flags by hand */
1845
    eflags |= (al == 0) << 6; /* zf */
1846
    eflags |= parity_table[al]; /* pf */
1847
    eflags |= (al & 0x80); /* sf */
1848
    CC_SRC = eflags;
1849
    FORCE_RET();
1850
}
1851

    
1852
void helper_into(int next_eip_addend)
1853
{
1854
    int eflags;
1855
    eflags = cc_table[CC_OP].compute_all();
1856
    if (eflags & CC_O) {
1857
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1858
    }
1859
}
1860

    
1861
void helper_cmpxchg8b(target_ulong a0)
1862
{
1863
    uint64_t d;
1864
    int eflags;
1865

    
1866
    eflags = cc_table[CC_OP].compute_all();
1867
    d = ldq(a0);
1868
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1869
        stq(a0, ((uint64_t)ECX << 32) | EBX);
1870
        eflags |= CC_Z;
1871
    } else {
1872
        EDX = (uint32_t)(d >> 32);
1873
        EAX = (uint32_t)d;
1874
        eflags &= ~CC_Z;
1875
    }
1876
    CC_SRC = eflags;
1877
}
1878

    
1879
void helper_single_step(void)
1880
{
1881
    env->dr[6] |= 0x4000;
1882
    raise_exception(EXCP01_SSTP);
1883
}
1884

    
1885
void helper_cpuid(void)
1886
{
1887
    uint32_t index;
1888
    index = (uint32_t)EAX;
1889

    
1890
    /* test if maximum index reached */
1891
    if (index & 0x80000000) {
1892
        if (index > env->cpuid_xlevel)
1893
            index = env->cpuid_level;
1894
    } else {
1895
        if (index > env->cpuid_level)
1896
            index = env->cpuid_level;
1897
    }
1898

    
1899
    switch(index) {
1900
    case 0:
1901
        EAX = env->cpuid_level;
1902
        EBX = env->cpuid_vendor1;
1903
        EDX = env->cpuid_vendor2;
1904
        ECX = env->cpuid_vendor3;
1905
        break;
1906
    case 1:
1907
        EAX = env->cpuid_version;
1908
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1909
        ECX = env->cpuid_ext_features;
1910
        EDX = env->cpuid_features;
1911
        break;
1912
    case 2:
1913
        /* cache info: needed for Pentium Pro compatibility */
1914
        EAX = 1;
1915
        EBX = 0;
1916
        ECX = 0;
1917
        EDX = 0x2c307d;
1918
        break;
1919
    case 0x80000000:
1920
        EAX = env->cpuid_xlevel;
1921
        EBX = env->cpuid_vendor1;
1922
        EDX = env->cpuid_vendor2;
1923
        ECX = env->cpuid_vendor3;
1924
        break;
1925
    case 0x80000001:
1926
        EAX = env->cpuid_features;
1927
        EBX = 0;
1928
        ECX = env->cpuid_ext3_features;
1929
        EDX = env->cpuid_ext2_features;
1930
        break;
1931
    case 0x80000002:
1932
    case 0x80000003:
1933
    case 0x80000004:
1934
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1935
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1936
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1937
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1938
        break;
1939
    case 0x80000005:
1940
        /* cache info (L1 cache) */
1941
        EAX = 0x01ff01ff;
1942
        EBX = 0x01ff01ff;
1943
        ECX = 0x40020140;
1944
        EDX = 0x40020140;
1945
        break;
1946
    case 0x80000006:
1947
        /* cache info (L2 cache) */
1948
        EAX = 0;
1949
        EBX = 0x42004200;
1950
        ECX = 0x02008140;
1951
        EDX = 0;
1952
        break;
1953
    case 0x80000008:
1954
        /* virtual & phys address size in low 2 bytes. */
1955
/* XXX: This value must match the one used in the MMU code. */ 
1956
#if defined(TARGET_X86_64)
1957
#  if defined(USE_KQEMU)
1958
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1959
#  else
1960
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1961
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1962
#  endif
1963
#else
1964
# if defined(USE_KQEMU)
1965
        EAX = 0x00000020;        /* 32 bits physical */
1966
#  else
1967
        EAX = 0x00000024;        /* 36 bits physical */
1968
#  endif
1969
#endif
1970
        EBX = 0;
1971
        ECX = 0;
1972
        EDX = 0;
1973
        break;
1974
    case 0x8000000A:
1975
        EAX = 0x00000001;
1976
        EBX = 0;
1977
        ECX = 0;
1978
        EDX = 0;
1979
        break;
1980
    default:
1981
        /* reserved values: zero */
1982
        EAX = 0;
1983
        EBX = 0;
1984
        ECX = 0;
1985
        EDX = 0;
1986
        break;
1987
    }
1988
}
1989

    
1990
void helper_enter_level(int level, int data32, target_ulong t1)
1991
{
1992
    target_ulong ssp;
1993
    uint32_t esp_mask, esp, ebp;
1994

    
1995
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1996
    ssp = env->segs[R_SS].base;
1997
    ebp = EBP;
1998
    esp = ESP;
1999
    if (data32) {
2000
        /* 32 bit */
2001
        esp -= 4;
2002
        while (--level) {
2003
            esp -= 4;
2004
            ebp -= 4;
2005
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2006
        }
2007
        esp -= 4;
2008
        stl(ssp + (esp & esp_mask), t1);
2009
    } else {
2010
        /* 16 bit */
2011
        esp -= 2;
2012
        while (--level) {
2013
            esp -= 2;
2014
            ebp -= 2;
2015
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2016
        }
2017
        esp -= 2;
2018
        stw(ssp + (esp & esp_mask), t1);
2019
    }
2020
}
2021

    
2022
#ifdef TARGET_X86_64
2023
void helper_enter64_level(int level, int data64, target_ulong t1)
2024
{
2025
    target_ulong esp, ebp;
2026
    ebp = EBP;
2027
    esp = ESP;
2028

    
2029
    if (data64) {
2030
        /* 64 bit */
2031
        esp -= 8;
2032
        while (--level) {
2033
            esp -= 8;
2034
            ebp -= 8;
2035
            stq(esp, ldq(ebp));
2036
        }
2037
        esp -= 8;
2038
        stq(esp, t1);
2039
    } else {
2040
        /* 16 bit */
2041
        esp -= 2;
2042
        while (--level) {
2043
            esp -= 2;
2044
            ebp -= 2;
2045
            stw(esp, lduw(ebp));
2046
        }
2047
        esp -= 2;
2048
        stw(esp, t1);
2049
    }
2050
}
2051
#endif
2052

    
2053
void helper_lldt(int selector)
2054
{
2055
    SegmentCache *dt;
2056
    uint32_t e1, e2;
2057
    int index, entry_limit;
2058
    target_ulong ptr;
2059

    
2060
    selector &= 0xffff;
2061
    if ((selector & 0xfffc) == 0) {
2062
        /* XXX: NULL selector case: invalid LDT */
2063
        env->ldt.base = 0;
2064
        env->ldt.limit = 0;
2065
    } else {
2066
        if (selector & 0x4)
2067
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068
        dt = &env->gdt;
2069
        index = selector & ~7;
2070
#ifdef TARGET_X86_64
2071
        if (env->hflags & HF_LMA_MASK)
2072
            entry_limit = 15;
2073
        else
2074
#endif
2075
            entry_limit = 7;
2076
        if ((index + entry_limit) > dt->limit)
2077
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078
        ptr = dt->base + index;
2079
        e1 = ldl_kernel(ptr);
2080
        e2 = ldl_kernel(ptr + 4);
2081
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2082
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083
        if (!(e2 & DESC_P_MASK))
2084
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2085
#ifdef TARGET_X86_64
2086
        if (env->hflags & HF_LMA_MASK) {
2087
            uint32_t e3;
2088
            e3 = ldl_kernel(ptr + 8);
2089
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2090
            env->ldt.base |= (target_ulong)e3 << 32;
2091
        } else
2092
#endif
2093
        {
2094
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2095
        }
2096
    }
2097
    env->ldt.selector = selector;
2098
}
2099

    
2100
void helper_ltr(int selector)
2101
{
2102
    SegmentCache *dt;
2103
    uint32_t e1, e2;
2104
    int index, type, entry_limit;
2105
    target_ulong ptr;
2106

    
2107
    selector &= 0xffff;
2108
    if ((selector & 0xfffc) == 0) {
2109
        /* NULL selector case: invalid TR */
2110
        env->tr.base = 0;
2111
        env->tr.limit = 0;
2112
        env->tr.flags = 0;
2113
    } else {
2114
        if (selector & 0x4)
2115
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116
        dt = &env->gdt;
2117
        index = selector & ~7;
2118
#ifdef TARGET_X86_64
2119
        if (env->hflags & HF_LMA_MASK)
2120
            entry_limit = 15;
2121
        else
2122
#endif
2123
            entry_limit = 7;
2124
        if ((index + entry_limit) > dt->limit)
2125
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126
        ptr = dt->base + index;
2127
        e1 = ldl_kernel(ptr);
2128
        e2 = ldl_kernel(ptr + 4);
2129
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2130
        if ((e2 & DESC_S_MASK) ||
2131
            (type != 1 && type != 9))
2132
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133
        if (!(e2 & DESC_P_MASK))
2134
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2135
#ifdef TARGET_X86_64
2136
        if (env->hflags & HF_LMA_MASK) {
2137
            uint32_t e3, e4;
2138
            e3 = ldl_kernel(ptr + 8);
2139
            e4 = ldl_kernel(ptr + 12);
2140
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2141
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2143
            env->tr.base |= (target_ulong)e3 << 32;
2144
        } else
2145
#endif
2146
        {
2147
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2148
        }
2149
        e2 |= DESC_TSS_BUSY_MASK;
2150
        stl_kernel(ptr + 4, e2);
2151
    }
2152
    env->tr.selector = selector;
2153
}
2154

    
2155
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2156
void helper_load_seg(int seg_reg, int selector)
2157
{
2158
    uint32_t e1, e2;
2159
    int cpl, dpl, rpl;
2160
    SegmentCache *dt;
2161
    int index;
2162
    target_ulong ptr;
2163

    
2164
    selector &= 0xffff;
2165
    cpl = env->hflags & HF_CPL_MASK;
2166
    if ((selector & 0xfffc) == 0) {
2167
        /* null selector case */
2168
        if (seg_reg == R_SS
2169
#ifdef TARGET_X86_64
2170
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2171
#endif
2172
            )
2173
            raise_exception_err(EXCP0D_GPF, 0);
2174
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2175
    } else {
2176

    
2177
        if (selector & 0x4)
2178
            dt = &env->ldt;
2179
        else
2180
            dt = &env->gdt;
2181
        index = selector & ~7;
2182
        if ((index + 7) > dt->limit)
2183
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184
        ptr = dt->base + index;
2185
        e1 = ldl_kernel(ptr);
2186
        e2 = ldl_kernel(ptr + 4);
2187

    
2188
        if (!(e2 & DESC_S_MASK))
2189
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2190
        rpl = selector & 3;
2191
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2192
        if (seg_reg == R_SS) {
2193
            /* must be writable segment */
2194
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2195
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196
            if (rpl != cpl || dpl != cpl)
2197
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
        } else {
2199
            /* must be readable segment */
2200
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2201
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2202

    
2203
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2204
                /* if not conforming code, test rights */
2205
                if (dpl < cpl || dpl < rpl)
2206
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2207
            }
2208
        }
2209

    
2210
        if (!(e2 & DESC_P_MASK)) {
2211
            if (seg_reg == R_SS)
2212
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2213
            else
2214
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2215
        }
2216

    
2217
        /* set the access bit if not already set */
2218
        if (!(e2 & DESC_A_MASK)) {
2219
            e2 |= DESC_A_MASK;
2220
            stl_kernel(ptr + 4, e2);
2221
        }
2222

    
2223
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2224
                       get_seg_base(e1, e2),
2225
                       get_seg_limit(e1, e2),
2226
                       e2);
2227
#if 0
2228
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2229
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2230
#endif
2231
    }
2232
}
2233

    
2234
/* protected mode jump */
2235
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2236
                           int next_eip_addend)
2237
{
2238
    int gate_cs, type;
2239
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2240
    target_ulong next_eip;
2241

    
2242
    if ((new_cs & 0xfffc) == 0)
2243
        raise_exception_err(EXCP0D_GPF, 0);
2244
    if (load_segment(&e1, &e2, new_cs) != 0)
2245
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246
    cpl = env->hflags & HF_CPL_MASK;
2247
    if (e2 & DESC_S_MASK) {
2248
        if (!(e2 & DESC_CS_MASK))
2249
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2251
        if (e2 & DESC_C_MASK) {
2252
            /* conforming code segment */
2253
            if (dpl > cpl)
2254
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255
        } else {
2256
            /* non conforming code segment */
2257
            rpl = new_cs & 3;
2258
            if (rpl > cpl)
2259
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260
            if (dpl != cpl)
2261
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262
        }
2263
        if (!(e2 & DESC_P_MASK))
2264
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2265
        limit = get_seg_limit(e1, e2);
2266
        if (new_eip > limit &&
2267
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2268
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2269
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2270
                       get_seg_base(e1, e2), limit, e2);
2271
        EIP = new_eip;
2272
    } else {
2273
        /* jump to call or task gate */
2274
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2275
        rpl = new_cs & 3;
2276
        cpl = env->hflags & HF_CPL_MASK;
2277
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2278
        switch(type) {
2279
        case 1: /* 286 TSS */
2280
        case 9: /* 386 TSS */
2281
        case 5: /* task gate */
2282
            if (dpl < cpl || dpl < rpl)
2283
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284
            next_eip = env->eip + next_eip_addend;
2285
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2286
            CC_OP = CC_OP_EFLAGS;
2287
            break;
2288
        case 4: /* 286 call gate */
2289
        case 12: /* 386 call gate */
2290
            if ((dpl < cpl) || (dpl < rpl))
2291
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292
            if (!(e2 & DESC_P_MASK))
2293
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2294
            gate_cs = e1 >> 16;
2295
            new_eip = (e1 & 0xffff);
2296
            if (type == 12)
2297
                new_eip |= (e2 & 0xffff0000);
2298
            if (load_segment(&e1, &e2, gate_cs) != 0)
2299
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2300
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2301
            /* must be code segment */
2302
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2303
                 (DESC_S_MASK | DESC_CS_MASK)))
2304
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2305
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2306
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2307
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2308
            if (!(e2 & DESC_P_MASK))
2309
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2310
            limit = get_seg_limit(e1, e2);
2311
            if (new_eip > limit)
2312
                raise_exception_err(EXCP0D_GPF, 0);
2313
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2314
                                   get_seg_base(e1, e2), limit, e2);
2315
            EIP = new_eip;
2316
            break;
2317
        default:
2318
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2319
            break;
2320
        }
2321
    }
2322
}
2323

    
2324
/* real mode call */
2325
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2326
                       int shift, int next_eip)
2327
{
2328
    int new_eip;
2329
    uint32_t esp, esp_mask;
2330
    target_ulong ssp;
2331

    
2332
    new_eip = new_eip1;
2333
    esp = ESP;
2334
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2335
    ssp = env->segs[R_SS].base;
2336
    if (shift) {
2337
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2338
        PUSHL(ssp, esp, esp_mask, next_eip);
2339
    } else {
2340
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2341
        PUSHW(ssp, esp, esp_mask, next_eip);
2342
    }
2343

    
2344
    SET_ESP(esp, esp_mask);
2345
    env->eip = new_eip;
2346
    env->segs[R_CS].selector = new_cs;
2347
    env->segs[R_CS].base = (new_cs << 4);
2348
}
2349

    
2350
/* protected mode call */
2351
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2352
                            int shift, int next_eip_addend)
2353
{
2354
    int new_stack, i;
2355
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2356
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2357
    uint32_t val, limit, old_sp_mask;
2358
    target_ulong ssp, old_ssp, next_eip;
2359

    
2360
    next_eip = env->eip + next_eip_addend;
2361
#ifdef DEBUG_PCALL
2362
    if (loglevel & CPU_LOG_PCALL) {
2363
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2364
                new_cs, (uint32_t)new_eip, shift);
2365
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2366
    }
2367
#endif
2368
    if ((new_cs & 0xfffc) == 0)
2369
        raise_exception_err(EXCP0D_GPF, 0);
2370
    if (load_segment(&e1, &e2, new_cs) != 0)
2371
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2372
    cpl = env->hflags & HF_CPL_MASK;
2373
#ifdef DEBUG_PCALL
2374
    if (loglevel & CPU_LOG_PCALL) {
2375
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2376
    }
2377
#endif
2378
    if (e2 & DESC_S_MASK) {
2379
        if (!(e2 & DESC_CS_MASK))
2380
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2382
        if (e2 & DESC_C_MASK) {
2383
            /* conforming code segment */
2384
            if (dpl > cpl)
2385
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386
        } else {
2387
            /* non conforming code segment */
2388
            rpl = new_cs & 3;
2389
            if (rpl > cpl)
2390
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2391
            if (dpl != cpl)
2392
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2393
        }
2394
        if (!(e2 & DESC_P_MASK))
2395
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2396

    
2397
#ifdef TARGET_X86_64
2398
        /* XXX: check 16/32 bit cases in long mode */
2399
        if (shift == 2) {
2400
            target_ulong rsp;
2401
            /* 64 bit case */
2402
            rsp = ESP;
2403
            PUSHQ(rsp, env->segs[R_CS].selector);
2404
            PUSHQ(rsp, next_eip);
2405
            /* from this point, not restartable */
2406
            ESP = rsp;
2407
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2408
                                   get_seg_base(e1, e2),
2409
                                   get_seg_limit(e1, e2), e2);
2410
            EIP = new_eip;
2411
        } else
2412
#endif
2413
        {
2414
            sp = ESP;
2415
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2416
            ssp = env->segs[R_SS].base;
2417
            if (shift) {
2418
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2419
                PUSHL(ssp, sp, sp_mask, next_eip);
2420
            } else {
2421
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2422
                PUSHW(ssp, sp, sp_mask, next_eip);
2423
            }
2424

    
2425
            limit = get_seg_limit(e1, e2);
2426
            if (new_eip > limit)
2427
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2428
            /* from this point, not restartable */
2429
            SET_ESP(sp, sp_mask);
2430
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2431
                                   get_seg_base(e1, e2), limit, e2);
2432
            EIP = new_eip;
2433
        }
2434
    } else {
2435
        /* check gate type */
2436
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2437
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2438
        rpl = new_cs & 3;
2439
        switch(type) {
2440
        case 1: /* available 286 TSS */
2441
        case 9: /* available 386 TSS */
2442
        case 5: /* task gate */
2443
            if (dpl < cpl || dpl < rpl)
2444
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2445
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2446
            CC_OP = CC_OP_EFLAGS;
2447
            return;
2448
        case 4: /* 286 call gate */
2449
        case 12: /* 386 call gate */
2450
            break;
2451
        default:
2452
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2453
            break;
2454
        }
2455
        shift = type >> 3;
2456

    
2457
        if (dpl < cpl || dpl < rpl)
2458
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459
        /* check valid bit */
2460
        if (!(e2 & DESC_P_MASK))
2461
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2462
        selector = e1 >> 16;
2463
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2464
        param_count = e2 & 0x1f;
2465
        if ((selector & 0xfffc) == 0)
2466
            raise_exception_err(EXCP0D_GPF, 0);
2467

    
2468
        if (load_segment(&e1, &e2, selector) != 0)
2469
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2470
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2471
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2472
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473
        if (dpl > cpl)
2474
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2475
        if (!(e2 & DESC_P_MASK))
2476
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2477

    
2478
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2479
            /* to inner privilege */
2480
            get_ss_esp_from_tss(&ss, &sp, dpl);
2481
#ifdef DEBUG_PCALL
2482
            if (loglevel & CPU_LOG_PCALL)
2483
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2484
                        ss, sp, param_count, ESP);
2485
#endif
2486
            if ((ss & 0xfffc) == 0)
2487
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488
            if ((ss & 3) != dpl)
2489
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2490
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2491
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2493
            if (ss_dpl != dpl)
2494
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2495
            if (!(ss_e2 & DESC_S_MASK) ||
2496
                (ss_e2 & DESC_CS_MASK) ||
2497
                !(ss_e2 & DESC_W_MASK))
2498
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2499
            if (!(ss_e2 & DESC_P_MASK))
2500
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2501

    
2502
            //            push_size = ((param_count * 2) + 8) << shift;
2503

    
2504
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2505
            old_ssp = env->segs[R_SS].base;
2506

    
2507
            sp_mask = get_sp_mask(ss_e2);
2508
            ssp = get_seg_base(ss_e1, ss_e2);
2509
            if (shift) {
2510
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2511
                PUSHL(ssp, sp, sp_mask, ESP);
2512
                for(i = param_count - 1; i >= 0; i--) {
2513
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2514
                    PUSHL(ssp, sp, sp_mask, val);
2515
                }
2516
            } else {
2517
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2518
                PUSHW(ssp, sp, sp_mask, ESP);
2519
                for(i = param_count - 1; i >= 0; i--) {
2520
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2521
                    PUSHW(ssp, sp, sp_mask, val);
2522
                }
2523
            }
2524
            new_stack = 1;
2525
        } else {
2526
            /* to same privilege */
2527
            sp = ESP;
2528
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2529
            ssp = env->segs[R_SS].base;
2530
            //            push_size = (4 << shift);
2531
            new_stack = 0;
2532
        }
2533

    
2534
        if (shift) {
2535
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2536
            PUSHL(ssp, sp, sp_mask, next_eip);
2537
        } else {
2538
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2539
            PUSHW(ssp, sp, sp_mask, next_eip);
2540
        }
2541

    
2542
        /* from this point, not restartable */
2543

    
2544
        if (new_stack) {
2545
            ss = (ss & ~3) | dpl;
2546
            cpu_x86_load_seg_cache(env, R_SS, ss,
2547
                                   ssp,
2548
                                   get_seg_limit(ss_e1, ss_e2),
2549
                                   ss_e2);
2550
        }
2551

    
2552
        selector = (selector & ~3) | dpl;
2553
        cpu_x86_load_seg_cache(env, R_CS, selector,
2554
                       get_seg_base(e1, e2),
2555
                       get_seg_limit(e1, e2),
2556
                       e2);
2557
        cpu_x86_set_cpl(env, dpl);
2558
        SET_ESP(sp, sp_mask);
2559
        EIP = offset;
2560
    }
2561
#ifdef USE_KQEMU
2562
    if (kqemu_is_ok(env)) {
2563
        env->exception_index = -1;
2564
        cpu_loop_exit();
2565
    }
2566
#endif
2567
}
2568

    
2569
/* real and vm86 mode iret */
2570
void helper_iret_real(int shift)
2571
{
2572
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2573
    target_ulong ssp;
2574
    int eflags_mask;
2575

    
2576
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2577
    sp = ESP;
2578
    ssp = env->segs[R_SS].base;
2579
    if (shift == 1) {
2580
        /* 32 bits */
2581
        POPL(ssp, sp, sp_mask, new_eip);
2582
        POPL(ssp, sp, sp_mask, new_cs);
2583
        new_cs &= 0xffff;
2584
        POPL(ssp, sp, sp_mask, new_eflags);
2585
    } else {
2586
        /* 16 bits */
2587
        POPW(ssp, sp, sp_mask, new_eip);
2588
        POPW(ssp, sp, sp_mask, new_cs);
2589
        POPW(ssp, sp, sp_mask, new_eflags);
2590
    }
2591
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2592
    load_seg_vm(R_CS, new_cs);
2593
    env->eip = new_eip;
2594
    if (env->eflags & VM_MASK)
2595
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2596
    else
2597
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2598
    if (shift == 0)
2599
        eflags_mask &= 0xffff;
2600
    load_eflags(new_eflags, eflags_mask);
2601
    env->hflags &= ~HF_NMI_MASK;
2602
}
2603

    
2604
static inline void validate_seg(int seg_reg, int cpl)
2605
{
2606
    int dpl;
2607
    uint32_t e2;
2608

    
2609
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2610
       they may still contain a valid base. I would be interested to
2611
       know how a real x86_64 CPU behaves */
2612
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2613
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2614
        return;
2615

    
2616
    e2 = env->segs[seg_reg].flags;
2617
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2618
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2619
        /* data or non conforming code segment */
2620
        if (dpl < cpl) {
2621
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2622
        }
2623
    }
2624
}
2625

    
2626
/* protected mode iret */
2627
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2628
{
2629
    uint32_t new_cs, new_eflags, new_ss;
2630
    uint32_t new_es, new_ds, new_fs, new_gs;
2631
    uint32_t e1, e2, ss_e1, ss_e2;
2632
    int cpl, dpl, rpl, eflags_mask, iopl;
2633
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2634

    
2635
#ifdef TARGET_X86_64
2636
    if (shift == 2)
2637
        sp_mask = -1;
2638
    else
2639
#endif
2640
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2641
    sp = ESP;
2642
    ssp = env->segs[R_SS].base;
2643
    new_eflags = 0; /* avoid warning */
2644
#ifdef TARGET_X86_64
2645
    if (shift == 2) {
2646
        POPQ(sp, new_eip);
2647
        POPQ(sp, new_cs);
2648
        new_cs &= 0xffff;
2649
        if (is_iret) {
2650
            POPQ(sp, new_eflags);
2651
        }
2652
    } else
2653
#endif
2654
    if (shift == 1) {
2655
        /* 32 bits */
2656
        POPL(ssp, sp, sp_mask, new_eip);
2657
        POPL(ssp, sp, sp_mask, new_cs);
2658
        new_cs &= 0xffff;
2659
        if (is_iret) {
2660
            POPL(ssp, sp, sp_mask, new_eflags);
2661
            if (new_eflags & VM_MASK)
2662
                goto return_to_vm86;
2663
        }
2664
    } else {
2665
        /* 16 bits */
2666
        POPW(ssp, sp, sp_mask, new_eip);
2667
        POPW(ssp, sp, sp_mask, new_cs);
2668
        if (is_iret)
2669
            POPW(ssp, sp, sp_mask, new_eflags);
2670
    }
2671
#ifdef DEBUG_PCALL
2672
    if (loglevel & CPU_LOG_PCALL) {
2673
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2674
                new_cs, new_eip, shift, addend);
2675
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2676
    }
2677
#endif
2678
    if ((new_cs & 0xfffc) == 0)
2679
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2680
    if (load_segment(&e1, &e2, new_cs) != 0)
2681
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682
    if (!(e2 & DESC_S_MASK) ||
2683
        !(e2 & DESC_CS_MASK))
2684
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2685
    cpl = env->hflags & HF_CPL_MASK;
2686
    rpl = new_cs & 3;
2687
    if (rpl < cpl)
2688
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2689
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2690
    if (e2 & DESC_C_MASK) {
2691
        if (dpl > rpl)
2692
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693
    } else {
2694
        if (dpl != rpl)
2695
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2696
    }
2697
    if (!(e2 & DESC_P_MASK))
2698
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2699

    
2700
    sp += addend;
2701
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2702
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2703
        /* return to same priledge level */
2704
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2705
                       get_seg_base(e1, e2),
2706
                       get_seg_limit(e1, e2),
2707
                       e2);
2708
    } else {
2709
        /* return to different privilege level */
2710
#ifdef TARGET_X86_64
2711
        if (shift == 2) {
2712
            POPQ(sp, new_esp);
2713
            POPQ(sp, new_ss);
2714
            new_ss &= 0xffff;
2715
        } else
2716
#endif
2717
        if (shift == 1) {
2718
            /* 32 bits */
2719
            POPL(ssp, sp, sp_mask, new_esp);
2720
            POPL(ssp, sp, sp_mask, new_ss);
2721
            new_ss &= 0xffff;
2722
        } else {
2723
            /* 16 bits */
2724
            POPW(ssp, sp, sp_mask, new_esp);
2725
            POPW(ssp, sp, sp_mask, new_ss);
2726
        }
2727
#ifdef DEBUG_PCALL
2728
        if (loglevel & CPU_LOG_PCALL) {
2729
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2730
                    new_ss, new_esp);
2731
        }
2732
#endif
2733
        if ((new_ss & 0xfffc) == 0) {
2734
#ifdef TARGET_X86_64
2735
            /* NULL ss is allowed in long mode if cpl != 3*/
2736
            /* XXX: test CS64 ? */
2737
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2738
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2739
                                       0, 0xffffffff,
2740
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2741
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2742
                                       DESC_W_MASK | DESC_A_MASK);
2743
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2744
            } else
2745
#endif
2746
            {
2747
                raise_exception_err(EXCP0D_GPF, 0);
2748
            }
2749
        } else {
2750
            if ((new_ss & 3) != rpl)
2751
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2752
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2753
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754
            if (!(ss_e2 & DESC_S_MASK) ||
2755
                (ss_e2 & DESC_CS_MASK) ||
2756
                !(ss_e2 & DESC_W_MASK))
2757
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2758
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2759
            if (dpl != rpl)
2760
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2761
            if (!(ss_e2 & DESC_P_MASK))
2762
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2763
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2764
                                   get_seg_base(ss_e1, ss_e2),
2765
                                   get_seg_limit(ss_e1, ss_e2),
2766
                                   ss_e2);
2767
        }
2768

    
2769
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2770
                       get_seg_base(e1, e2),
2771
                       get_seg_limit(e1, e2),
2772
                       e2);
2773
        cpu_x86_set_cpl(env, rpl);
2774
        sp = new_esp;
2775
#ifdef TARGET_X86_64
2776
        if (env->hflags & HF_CS64_MASK)
2777
            sp_mask = -1;
2778
        else
2779
#endif
2780
            sp_mask = get_sp_mask(ss_e2);
2781

    
2782
        /* validate data segments */
2783
        validate_seg(R_ES, rpl);
2784
        validate_seg(R_DS, rpl);
2785
        validate_seg(R_FS, rpl);
2786
        validate_seg(R_GS, rpl);
2787

    
2788
        sp += addend;
2789
    }
2790
    SET_ESP(sp, sp_mask);
2791
    env->eip = new_eip;
2792
    if (is_iret) {
2793
        /* NOTE: 'cpl' is the _old_ CPL */
2794
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2795
        if (cpl == 0)
2796
            eflags_mask |= IOPL_MASK;
2797
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2798
        if (cpl <= iopl)
2799
            eflags_mask |= IF_MASK;
2800
        if (shift == 0)
2801
            eflags_mask &= 0xffff;
2802
        load_eflags(new_eflags, eflags_mask);
2803
    }
2804
    return;
2805

    
2806
 return_to_vm86:
2807
    POPL(ssp, sp, sp_mask, new_esp);
2808
    POPL(ssp, sp, sp_mask, new_ss);
2809
    POPL(ssp, sp, sp_mask, new_es);
2810
    POPL(ssp, sp, sp_mask, new_ds);
2811
    POPL(ssp, sp, sp_mask, new_fs);
2812
    POPL(ssp, sp, sp_mask, new_gs);
2813

    
2814
    /* modify processor state */
2815
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2816
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2817
    load_seg_vm(R_CS, new_cs & 0xffff);
2818
    cpu_x86_set_cpl(env, 3);
2819
    load_seg_vm(R_SS, new_ss & 0xffff);
2820
    load_seg_vm(R_ES, new_es & 0xffff);
2821
    load_seg_vm(R_DS, new_ds & 0xffff);
2822
    load_seg_vm(R_FS, new_fs & 0xffff);
2823
    load_seg_vm(R_GS, new_gs & 0xffff);
2824

    
2825
    env->eip = new_eip & 0xffff;
2826
    ESP = new_esp;
2827
}
2828

    
2829
void helper_iret_protected(int shift, int next_eip)
2830
{
2831
    int tss_selector, type;
2832
    uint32_t e1, e2;
2833

    
2834
    /* specific case for TSS */
2835
    if (env->eflags & NT_MASK) {
2836
#ifdef TARGET_X86_64
2837
        if (env->hflags & HF_LMA_MASK)
2838
            raise_exception_err(EXCP0D_GPF, 0);
2839
#endif
2840
        tss_selector = lduw_kernel(env->tr.base + 0);
2841
        if (tss_selector & 4)
2842
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2843
        if (load_segment(&e1, &e2, tss_selector) != 0)
2844
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2845
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2846
        /* NOTE: we check both segment and busy TSS */
2847
        if (type != 3)
2848
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2849
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2850
    } else {
2851
        helper_ret_protected(shift, 1, 0);
2852
    }
2853
    env->hflags &= ~HF_NMI_MASK;
2854
#ifdef USE_KQEMU
2855
    if (kqemu_is_ok(env)) {
2856
        CC_OP = CC_OP_EFLAGS;
2857
        env->exception_index = -1;
2858
        cpu_loop_exit();
2859
    }
2860
#endif
2861
}
2862

    
2863
void helper_lret_protected(int shift, int addend)
2864
{
2865
    helper_ret_protected(shift, 0, addend);
2866
#ifdef USE_KQEMU
2867
    if (kqemu_is_ok(env)) {
2868
        env->exception_index = -1;
2869
        cpu_loop_exit();
2870
    }
2871
#endif
2872
}
2873

    
2874
void helper_sysenter(void)
2875
{
2876
    if (env->sysenter_cs == 0) {
2877
        raise_exception_err(EXCP0D_GPF, 0);
2878
    }
2879
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2880
    cpu_x86_set_cpl(env, 0);
2881
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2882
                           0, 0xffffffff,
2883
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2884
                           DESC_S_MASK |
2885
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2886
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2887
                           0, 0xffffffff,
2888
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2889
                           DESC_S_MASK |
2890
                           DESC_W_MASK | DESC_A_MASK);
2891
    ESP = env->sysenter_esp;
2892
    EIP = env->sysenter_eip;
2893
}
2894

    
2895
void helper_sysexit(void)
2896
{
2897
    int cpl;
2898

    
2899
    cpl = env->hflags & HF_CPL_MASK;
2900
    if (env->sysenter_cs == 0 || cpl != 0) {
2901
        raise_exception_err(EXCP0D_GPF, 0);
2902
    }
2903
    cpu_x86_set_cpl(env, 3);
2904
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2905
                           0, 0xffffffff,
2906
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2907
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2908
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2909
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2910
                           0, 0xffffffff,
2911
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2912
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2913
                           DESC_W_MASK | DESC_A_MASK);
2914
    ESP = ECX;
2915
    EIP = EDX;
2916
#ifdef USE_KQEMU
2917
    if (kqemu_is_ok(env)) {
2918
        env->exception_index = -1;
2919
        cpu_loop_exit();
2920
    }
2921
#endif
2922
}
2923

    
2924
void helper_movl_crN_T0(int reg, target_ulong t0)
2925
{
2926
#if !defined(CONFIG_USER_ONLY)
2927
    switch(reg) {
2928
    case 0:
2929
        cpu_x86_update_cr0(env, t0);
2930
        break;
2931
    case 3:
2932
        cpu_x86_update_cr3(env, t0);
2933
        break;
2934
    case 4:
2935
        cpu_x86_update_cr4(env, t0);
2936
        break;
2937
    case 8:
2938
        cpu_set_apic_tpr(env, t0);
2939
        env->cr[8] = t0;
2940
        break;
2941
    default:
2942
        env->cr[reg] = t0;
2943
        break;
2944
    }
2945
#endif
2946
}
2947

    
2948
void helper_lmsw(target_ulong t0)
2949
{
2950
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951
       if already set to one. */
2952
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2953
    helper_movl_crN_T0(0, t0);
2954
}
2955

    
2956
void helper_clts(void)
2957
{
2958
    env->cr[0] &= ~CR0_TS_MASK;
2959
    env->hflags &= ~HF_TS_MASK;
2960
}
2961

    
2962
#if !defined(CONFIG_USER_ONLY)
2963
target_ulong helper_movtl_T0_cr8(void)
2964
{
2965
    return cpu_get_apic_tpr(env);
2966
}
2967
#endif
2968

    
2969
/* XXX: do more */
2970
void helper_movl_drN_T0(int reg, target_ulong t0)
2971
{
2972
    env->dr[reg] = t0;
2973
}
2974

    
2975
void helper_invlpg(target_ulong addr)
2976
{
2977
    cpu_x86_flush_tlb(env, addr);
2978
}
2979

    
2980
void helper_rdtsc(void)
2981
{
2982
    uint64_t val;
2983

    
2984
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2985
        raise_exception(EXCP0D_GPF);
2986
    }
2987
    val = cpu_get_tsc(env);
2988
    EAX = (uint32_t)(val);
2989
    EDX = (uint32_t)(val >> 32);
2990
}
2991

    
2992
void helper_rdpmc(void)
2993
{
2994
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2995
        raise_exception(EXCP0D_GPF);
2996
    }
2997

    
2998
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2999
    
3000
    /* currently unimplemented */
3001
    raise_exception_err(EXCP06_ILLOP, 0);
3002
}
3003

    
3004
#if defined(CONFIG_USER_ONLY)
3005
void helper_wrmsr(void)
3006
{
3007
}
3008

    
3009
void helper_rdmsr(void)
3010
{
3011
}
3012
#else
3013
void helper_wrmsr(void)
3014
{
3015
    uint64_t val;
3016

    
3017
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3018

    
3019
    switch((uint32_t)ECX) {
3020
    case MSR_IA32_SYSENTER_CS:
3021
        env->sysenter_cs = val & 0xffff;
3022
        break;
3023
    case MSR_IA32_SYSENTER_ESP:
3024
        env->sysenter_esp = val;
3025
        break;
3026
    case MSR_IA32_SYSENTER_EIP:
3027
        env->sysenter_eip = val;
3028
        break;
3029
    case MSR_IA32_APICBASE:
3030
        cpu_set_apic_base(env, val);
3031
        break;
3032
    case MSR_EFER:
3033
        {
3034
            uint64_t update_mask;
3035
            update_mask = 0;
3036
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3037
                update_mask |= MSR_EFER_SCE;
3038
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3039
                update_mask |= MSR_EFER_LME;
3040
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3041
                update_mask |= MSR_EFER_FFXSR;
3042
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3043
                update_mask |= MSR_EFER_NXE;
3044
            env->efer = (env->efer & ~update_mask) |
3045
            (val & update_mask);
3046
        }
3047
        break;
3048
    case MSR_STAR:
3049
        env->star = val;
3050
        break;
3051
    case MSR_PAT:
3052
        env->pat = val;
3053
        break;
3054
    case MSR_VM_HSAVE_PA:
3055
        env->vm_hsave = val;
3056
        break;
3057
#ifdef TARGET_X86_64
3058
    case MSR_LSTAR:
3059
        env->lstar = val;
3060
        break;
3061
    case MSR_CSTAR:
3062
        env->cstar = val;
3063
        break;
3064
    case MSR_FMASK:
3065
        env->fmask = val;
3066
        break;
3067
    case MSR_FSBASE:
3068
        env->segs[R_FS].base = val;
3069
        break;
3070
    case MSR_GSBASE:
3071
        env->segs[R_GS].base = val;
3072
        break;
3073
    case MSR_KERNELGSBASE:
3074
        env->kernelgsbase = val;
3075
        break;
3076
#endif
3077
    default:
3078
        /* XXX: exception ? */
3079
        break;
3080
    }
3081
}
3082

    
3083
void helper_rdmsr(void)
3084
{
3085
    uint64_t val;
3086
    switch((uint32_t)ECX) {
3087
    case MSR_IA32_SYSENTER_CS:
3088
        val = env->sysenter_cs;
3089
        break;
3090
    case MSR_IA32_SYSENTER_ESP:
3091
        val = env->sysenter_esp;
3092
        break;
3093
    case MSR_IA32_SYSENTER_EIP:
3094
        val = env->sysenter_eip;
3095
        break;
3096
    case MSR_IA32_APICBASE:
3097
        val = cpu_get_apic_base(env);
3098
        break;
3099
    case MSR_EFER:
3100
        val = env->efer;
3101
        break;
3102
    case MSR_STAR:
3103
        val = env->star;
3104
        break;
3105
    case MSR_PAT:
3106
        val = env->pat;
3107
        break;
3108
    case MSR_VM_HSAVE_PA:
3109
        val = env->vm_hsave;
3110
        break;
3111
#ifdef TARGET_X86_64
3112
    case MSR_LSTAR:
3113
        val = env->lstar;
3114
        break;
3115
    case MSR_CSTAR:
3116
        val = env->cstar;
3117
        break;
3118
    case MSR_FMASK:
3119
        val = env->fmask;
3120
        break;
3121
    case MSR_FSBASE:
3122
        val = env->segs[R_FS].base;
3123
        break;
3124
    case MSR_GSBASE:
3125
        val = env->segs[R_GS].base;
3126
        break;
3127
    case MSR_KERNELGSBASE:
3128
        val = env->kernelgsbase;
3129
        break;
3130
#endif
3131
    default:
3132
        /* XXX: exception ? */
3133
        val = 0;
3134
        break;
3135
    }
3136
    EAX = (uint32_t)(val);
3137
    EDX = (uint32_t)(val >> 32);
3138
}
3139
#endif
3140

    
3141
uint32_t helper_lsl(uint32_t selector)
3142
{
3143
    unsigned int limit;
3144
    uint32_t e1, e2, eflags;
3145
    int rpl, dpl, cpl, type;
3146

    
3147
    selector &= 0xffff;
3148
    eflags = cc_table[CC_OP].compute_all();
3149
    if (load_segment(&e1, &e2, selector) != 0)
3150
        goto fail;
3151
    rpl = selector & 3;
3152
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3153
    cpl = env->hflags & HF_CPL_MASK;
3154
    if (e2 & DESC_S_MASK) {
3155
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3156
            /* conforming */
3157
        } else {
3158
            if (dpl < cpl || dpl < rpl)
3159
                goto fail;
3160
        }
3161
    } else {
3162
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3163
        switch(type) {
3164
        case 1:
3165
        case 2:
3166
        case 3:
3167
        case 9:
3168
        case 11:
3169
            break;
3170
        default:
3171
            goto fail;
3172
        }
3173
        if (dpl < cpl || dpl < rpl) {
3174
        fail:
3175
            CC_SRC = eflags & ~CC_Z;
3176
            return 0;
3177
        }
3178
    }
3179
    limit = get_seg_limit(e1, e2);
3180
    CC_SRC = eflags | CC_Z;
3181
    return limit;
3182
}
3183

    
3184
uint32_t helper_lar(uint32_t selector)
3185
{
3186
    uint32_t e1, e2, eflags;
3187
    int rpl, dpl, cpl, type;
3188

    
3189
    selector &= 0xffff;
3190
    eflags = cc_table[CC_OP].compute_all();
3191
    if ((selector & 0xfffc) == 0)
3192
        goto fail;
3193
    if (load_segment(&e1, &e2, selector) != 0)
3194
        goto fail;
3195
    rpl = selector & 3;
3196
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3197
    cpl = env->hflags & HF_CPL_MASK;
3198
    if (e2 & DESC_S_MASK) {
3199
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3200
            /* conforming */
3201
        } else {
3202
            if (dpl < cpl || dpl < rpl)
3203
                goto fail;
3204
        }
3205
    } else {
3206
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3207
        switch(type) {
3208
        case 1:
3209
        case 2:
3210
        case 3:
3211
        case 4:
3212
        case 5:
3213
        case 9:
3214
        case 11:
3215
        case 12:
3216
            break;
3217
        default:
3218
            goto fail;
3219
        }
3220
        if (dpl < cpl || dpl < rpl) {
3221
        fail:
3222
            CC_SRC = eflags & ~CC_Z;
3223
            return 0;
3224
        }
3225
    }
3226
    CC_SRC = eflags | CC_Z;
3227
    return e2 & 0x00f0ff00;
3228
}
3229

    
3230
void helper_verr(uint32_t selector)
3231
{
3232
    uint32_t e1, e2, eflags;
3233
    int rpl, dpl, cpl;
3234

    
3235
    selector &= 0xffff;
3236
    eflags = cc_table[CC_OP].compute_all();
3237
    if ((selector & 0xfffc) == 0)
3238
        goto fail;
3239
    if (load_segment(&e1, &e2, selector) != 0)
3240
        goto fail;
3241
    if (!(e2 & DESC_S_MASK))
3242
        goto fail;
3243
    rpl = selector & 3;
3244
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3245
    cpl = env->hflags & HF_CPL_MASK;
3246
    if (e2 & DESC_CS_MASK) {
3247
        if (!(e2 & DESC_R_MASK))
3248
            goto fail;
3249
        if (!(e2 & DESC_C_MASK)) {
3250
            if (dpl < cpl || dpl < rpl)
3251
                goto fail;
3252
        }
3253
    } else {
3254
        if (dpl < cpl || dpl < rpl) {
3255
        fail:
3256
            CC_SRC = eflags & ~CC_Z;
3257
            return;
3258
        }
3259
    }
3260
    CC_SRC = eflags | CC_Z;
3261
}
3262

    
3263
void helper_verw(uint32_t selector)
3264
{
3265
    uint32_t e1, e2, eflags;
3266
    int rpl, dpl, cpl;
3267

    
3268
    selector &= 0xffff;
3269
    eflags = cc_table[CC_OP].compute_all();
3270
    if ((selector & 0xfffc) == 0)
3271
        goto fail;
3272
    if (load_segment(&e1, &e2, selector) != 0)
3273
        goto fail;
3274
    if (!(e2 & DESC_S_MASK))
3275
        goto fail;
3276
    rpl = selector & 3;
3277
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3278
    cpl = env->hflags & HF_CPL_MASK;
3279
    if (e2 & DESC_CS_MASK) {
3280
        goto fail;
3281
    } else {
3282
        if (dpl < cpl || dpl < rpl)
3283
            goto fail;
3284
        if (!(e2 & DESC_W_MASK)) {
3285
        fail:
3286
            CC_SRC = eflags & ~CC_Z;
3287
            return;
3288
        }
3289
    }
3290
    CC_SRC = eflags | CC_Z;
3291
}
3292

    
3293
/* x87 FPU helpers */
3294

    
3295
static void fpu_set_exception(int mask)
3296
{
3297
    env->fpus |= mask;
3298
    if (env->fpus & (~env->fpuc & FPUC_EM))
3299
        env->fpus |= FPUS_SE | FPUS_B;
3300
}
3301

    
3302
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3303
{
3304
    if (b == 0.0)
3305
        fpu_set_exception(FPUS_ZE);
3306
    return a / b;
3307
}
3308

    
3309
void fpu_raise_exception(void)
3310
{
3311
    if (env->cr[0] & CR0_NE_MASK) {
3312
        raise_exception(EXCP10_COPR);
3313
    }
3314
#if !defined(CONFIG_USER_ONLY)
3315
    else {
3316
        cpu_set_ferr(env);
3317
    }
3318
#endif
3319
}
3320

    
3321
void helper_flds_FT0(uint32_t val)
3322
{
3323
    union {
3324
        float32 f;
3325
        uint32_t i;
3326
    } u;
3327
    u.i = val;
3328
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3329
}
3330

    
3331
void helper_fldl_FT0(uint64_t val)
3332
{
3333
    union {
3334
        float64 f;
3335
        uint64_t i;
3336
    } u;
3337
    u.i = val;
3338
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3339
}
3340

    
3341
void helper_fildl_FT0(int32_t val)
3342
{
3343
    FT0 = int32_to_floatx(val, &env->fp_status);
3344
}
3345

    
3346
void helper_flds_ST0(uint32_t val)
3347
{
3348
    int new_fpstt;
3349
    union {
3350
        float32 f;
3351
        uint32_t i;
3352
    } u;
3353
    new_fpstt = (env->fpstt - 1) & 7;
3354
    u.i = val;
3355
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3356
    env->fpstt = new_fpstt;
3357
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3358
}
3359

    
3360
void helper_fldl_ST0(uint64_t val)
3361
{
3362
    int new_fpstt;
3363
    union {
3364
        float64 f;
3365
        uint64_t i;
3366
    } u;
3367
    new_fpstt = (env->fpstt - 1) & 7;
3368
    u.i = val;
3369
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3370
    env->fpstt = new_fpstt;
3371
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3372
}
3373

    
3374
void helper_fildl_ST0(int32_t val)
3375
{
3376
    int new_fpstt;
3377
    new_fpstt = (env->fpstt - 1) & 7;
3378
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3379
    env->fpstt = new_fpstt;
3380
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3381
}
3382

    
3383
void helper_fildll_ST0(int64_t val)
3384
{
3385
    int new_fpstt;
3386
    new_fpstt = (env->fpstt - 1) & 7;
3387
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3388
    env->fpstt = new_fpstt;
3389
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3390
}
3391

    
3392
uint32_t helper_fsts_ST0(void)
3393
{
3394
    union {
3395
        float32 f;
3396
        uint32_t i;
3397
    } u;
3398
    u.f = floatx_to_float32(ST0, &env->fp_status);
3399
    return u.i;
3400
}
3401

    
3402
uint64_t helper_fstl_ST0(void)
3403
{
3404
    union {
3405
        float64 f;
3406
        uint64_t i;
3407
    } u;
3408
    u.f = floatx_to_float64(ST0, &env->fp_status);
3409
    return u.i;
3410
}
3411

    
3412
int32_t helper_fist_ST0(void)
3413
{
3414
    int32_t val;
3415
    val = floatx_to_int32(ST0, &env->fp_status);
3416
    if (val != (int16_t)val)
3417
        val = -32768;
3418
    return val;
3419
}
3420

    
3421
int32_t helper_fistl_ST0(void)
3422
{
3423
    int32_t val;
3424
    val = floatx_to_int32(ST0, &env->fp_status);
3425
    return val;
3426
}
3427

    
3428
int64_t helper_fistll_ST0(void)
3429
{
3430
    int64_t val;
3431
    val = floatx_to_int64(ST0, &env->fp_status);
3432
    return val;
3433
}
3434

    
3435
int32_t helper_fistt_ST0(void)
3436
{
3437
    int32_t val;
3438
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3439
    if (val != (int16_t)val)
3440
        val = -32768;
3441
    return val;
3442
}
3443

    
3444
int32_t helper_fisttl_ST0(void)
3445
{
3446
    int32_t val;
3447
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3448
    return val;
3449
}
3450

    
3451
int64_t helper_fisttll_ST0(void)
3452
{
3453
    int64_t val;
3454
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3455
    return val;
3456
}
3457

    
3458
void helper_fldt_ST0(target_ulong ptr)
3459
{
3460
    int new_fpstt;
3461
    new_fpstt = (env->fpstt - 1) & 7;
3462
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3463
    env->fpstt = new_fpstt;
3464
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3465
}
3466

    
3467
void helper_fstt_ST0(target_ulong ptr)
3468
{
3469
    helper_fstt(ST0, ptr);
3470
}
3471

    
3472
void helper_fpush(void)
3473
{
3474
    fpush();
3475
}
3476

    
3477
void helper_fpop(void)
3478
{
3479
    fpop();
3480
}
3481

    
3482
void helper_fdecstp(void)
3483
{
3484
    env->fpstt = (env->fpstt - 1) & 7;
3485
    env->fpus &= (~0x4700);
3486
}
3487

    
3488
void helper_fincstp(void)
3489
{
3490
    env->fpstt = (env->fpstt + 1) & 7;
3491
    env->fpus &= (~0x4700);
3492
}
3493

    
3494
/* FPU move */
3495

    
3496
void helper_ffree_STN(int st_index)
3497
{
3498
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3499
}
3500

    
3501
void helper_fmov_ST0_FT0(void)
3502
{
3503
    ST0 = FT0;
3504
}
3505

    
3506
void helper_fmov_FT0_STN(int st_index)
3507
{
3508
    FT0 = ST(st_index);
3509
}
3510

    
3511
void helper_fmov_ST0_STN(int st_index)
3512
{
3513
    ST0 = ST(st_index);
3514
}
3515

    
3516
void helper_fmov_STN_ST0(int st_index)
3517
{
3518
    ST(st_index) = ST0;
3519
}
3520

    
3521
void helper_fxchg_ST0_STN(int st_index)
3522
{
3523
    CPU86_LDouble tmp;
3524
    tmp = ST(st_index);
3525
    ST(st_index) = ST0;
3526
    ST0 = tmp;
3527
}
3528

    
3529
/* FPU operations */
3530

    
3531
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3532

    
3533
void helper_fcom_ST0_FT0(void)
3534
{
3535
    int ret;
3536

    
3537
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3538
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3539
    FORCE_RET();
3540
}
3541

    
3542
void helper_fucom_ST0_FT0(void)
3543
{
3544
    int ret;
3545

    
3546
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3547
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3548
    FORCE_RET();
3549
}
3550

    
3551
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3552

    
3553
void helper_fcomi_ST0_FT0(void)
3554
{
3555
    int eflags;
3556
    int ret;
3557

    
3558
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3559
    eflags = cc_table[CC_OP].compute_all();
3560
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3561
    CC_SRC = eflags;
3562
    FORCE_RET();
3563
}
3564

    
3565
void helper_fucomi_ST0_FT0(void)
3566
{
3567
    int eflags;
3568
    int ret;
3569

    
3570
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3571
    eflags = cc_table[CC_OP].compute_all();
3572
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3573
    CC_SRC = eflags;
3574
    FORCE_RET();
3575
}
3576

    
3577
void helper_fadd_ST0_FT0(void)
3578
{
3579
    ST0 += FT0;
3580
}
3581

    
3582
void helper_fmul_ST0_FT0(void)
3583
{
3584
    ST0 *= FT0;
3585
}
3586

    
3587
void helper_fsub_ST0_FT0(void)
3588
{
3589
    ST0 -= FT0;
3590
}
3591

    
3592
void helper_fsubr_ST0_FT0(void)
3593
{
3594
    ST0 = FT0 - ST0;
3595
}
3596

    
3597
void helper_fdiv_ST0_FT0(void)
3598
{
3599
    ST0 = helper_fdiv(ST0, FT0);
3600
}
3601

    
3602
void helper_fdivr_ST0_FT0(void)
3603
{
3604
    ST0 = helper_fdiv(FT0, ST0);
3605
}
3606

    
3607
/* fp operations between STN and ST0 */
3608

    
3609
void helper_fadd_STN_ST0(int st_index)
3610
{
3611
    ST(st_index) += ST0;
3612
}
3613

    
3614
void helper_fmul_STN_ST0(int st_index)
3615
{
3616
    ST(st_index) *= ST0;
3617
}
3618

    
3619
void helper_fsub_STN_ST0(int st_index)
3620
{
3621
    ST(st_index) -= ST0;
3622
}
3623

    
3624
void helper_fsubr_STN_ST0(int st_index)
3625
{
3626
    CPU86_LDouble *p;
3627
    p = &ST(st_index);
3628
    *p = ST0 - *p;
3629
}
3630

    
3631
void helper_fdiv_STN_ST0(int st_index)
3632
{
3633
    CPU86_LDouble *p;
3634
    p = &ST(st_index);
3635
    *p = helper_fdiv(*p, ST0);
3636
}
3637

    
3638
void helper_fdivr_STN_ST0(int st_index)
3639
{
3640
    CPU86_LDouble *p;
3641
    p = &ST(st_index);
3642
    *p = helper_fdiv(ST0, *p);
3643
}
3644

    
3645
/* misc FPU operations */
3646
void helper_fchs_ST0(void)
3647
{
3648
    ST0 = floatx_chs(ST0);
3649
}
3650

    
3651
void helper_fabs_ST0(void)
3652
{
3653
    ST0 = floatx_abs(ST0);
3654
}
3655

    
3656
void helper_fld1_ST0(void)
3657
{
3658
    ST0 = f15rk[1];
3659
}
3660

    
3661
void helper_fldl2t_ST0(void)
3662
{
3663
    ST0 = f15rk[6];
3664
}
3665

    
3666
void helper_fldl2e_ST0(void)
3667
{
3668
    ST0 = f15rk[5];
3669
}
3670

    
3671
void helper_fldpi_ST0(void)
3672
{
3673
    ST0 = f15rk[2];
3674
}
3675

    
3676
void helper_fldlg2_ST0(void)
3677
{
3678
    ST0 = f15rk[3];
3679
}
3680

    
3681
void helper_fldln2_ST0(void)
3682
{
3683
    ST0 = f15rk[4];
3684
}
3685

    
3686
void helper_fldz_ST0(void)
3687
{
3688
    ST0 = f15rk[0];
3689
}
3690

    
3691
void helper_fldz_FT0(void)
3692
{
3693
    FT0 = f15rk[0];
3694
}
3695

    
3696
uint32_t helper_fnstsw(void)
3697
{
3698
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3699
}
3700

    
3701
uint32_t helper_fnstcw(void)
3702
{
3703
    return env->fpuc;
3704
}
3705

    
3706
static void update_fp_status(void)
3707
{
3708
    int rnd_type;
3709

    
3710
    /* set rounding mode */
3711
    switch(env->fpuc & RC_MASK) {
3712
    default:
3713
    case RC_NEAR:
3714
        rnd_type = float_round_nearest_even;
3715
        break;
3716
    case RC_DOWN:
3717
        rnd_type = float_round_down;
3718
        break;
3719
    case RC_UP:
3720
        rnd_type = float_round_up;
3721
        break;
3722
    case RC_CHOP:
3723
        rnd_type = float_round_to_zero;
3724
        break;
3725
    }
3726
    set_float_rounding_mode(rnd_type, &env->fp_status);
3727
#ifdef FLOATX80
3728
    switch((env->fpuc >> 8) & 3) {
3729
    case 0:
3730
        rnd_type = 32;
3731
        break;
3732
    case 2:
3733
        rnd_type = 64;
3734
        break;
3735
    case 3:
3736
    default:
3737
        rnd_type = 80;
3738
        break;
3739
    }
3740
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3741
#endif
3742
}
3743

    
3744
void helper_fldcw(uint32_t val)
3745
{
3746
    env->fpuc = val;
3747
    update_fp_status();
3748
}
3749

    
3750
void helper_fclex(void)
3751
{
3752
    env->fpus &= 0x7f00;
3753
}
3754

    
3755
void helper_fwait(void)
3756
{
3757
    if (env->fpus & FPUS_SE)
3758
        fpu_raise_exception();
3759
    FORCE_RET();
3760
}
3761

    
3762
void helper_fninit(void)
3763
{
3764
    env->fpus = 0;
3765
    env->fpstt = 0;
3766
    env->fpuc = 0x37f;
3767
    env->fptags[0] = 1;
3768
    env->fptags[1] = 1;
3769
    env->fptags[2] = 1;
3770
    env->fptags[3] = 1;
3771
    env->fptags[4] = 1;
3772
    env->fptags[5] = 1;
3773
    env->fptags[6] = 1;
3774
    env->fptags[7] = 1;
3775
}
3776

    
3777
/* BCD ops */
3778

    
3779
void helper_fbld_ST0(target_ulong ptr)
3780
{
3781
    CPU86_LDouble tmp;
3782
    uint64_t val;
3783
    unsigned int v;
3784
    int i;
3785

    
3786
    val = 0;
3787
    for(i = 8; i >= 0; i--) {
3788
        v = ldub(ptr + i);
3789
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3790
    }
3791
    tmp = val;
3792
    if (ldub(ptr + 9) & 0x80)
3793
        tmp = -tmp;
3794
    fpush();
3795
    ST0 = tmp;
3796
}
3797

    
3798
void helper_fbst_ST0(target_ulong ptr)
3799
{
3800
    int v;
3801
    target_ulong mem_ref, mem_end;
3802
    int64_t val;
3803

    
3804
    val = floatx_to_int64(ST0, &env->fp_status);
3805
    mem_ref = ptr;
3806
    mem_end = mem_ref + 9;
3807
    if (val < 0) {
3808
        stb(mem_end, 0x80);
3809
        val = -val;
3810
    } else {
3811
        stb(mem_end, 0x00);
3812
    }
3813
    while (mem_ref < mem_end) {
3814
        if (val == 0)
3815
            break;
3816
        v = val % 100;
3817
        val = val / 100;
3818
        v = ((v / 10) << 4) | (v % 10);
3819
        stb(mem_ref++, v);
3820
    }
3821
    while (mem_ref < mem_end) {
3822
        stb(mem_ref++, 0);
3823
    }
3824
}
3825

    
3826
void helper_f2xm1(void)
3827
{
3828
    ST0 = pow(2.0,ST0) - 1.0;
3829
}
3830

    
3831
void helper_fyl2x(void)
3832
{
3833
    CPU86_LDouble fptemp;
3834

    
3835
    fptemp = ST0;
3836
    if (fptemp>0.0){
3837
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3838
        ST1 *= fptemp;
3839
        fpop();
3840
    } else {
3841
        env->fpus &= (~0x4700);
3842
        env->fpus |= 0x400;
3843
    }
3844
}
3845

    
3846
void helper_fptan(void)
3847
{
3848
    CPU86_LDouble fptemp;
3849

    
3850
    fptemp = ST0;
3851
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3852
        env->fpus |= 0x400;
3853
    } else {
3854
        ST0 = tan(fptemp);
3855
        fpush();
3856
        ST0 = 1.0;
3857
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3858
        /* the above code is for  |arg| < 2**52 only */
3859
    }
3860
}
3861

    
3862
void helper_fpatan(void)
3863
{
3864
    CPU86_LDouble fptemp, fpsrcop;
3865

    
3866
    fpsrcop = ST1;
3867
    fptemp = ST0;
3868
    ST1 = atan2(fpsrcop,fptemp);
3869
    fpop();
3870
}
3871

    
3872
void helper_fxtract(void)
3873
{
3874
    CPU86_LDoubleU temp;
3875
    unsigned int expdif;
3876

    
3877
    temp.d = ST0;
3878
    expdif = EXPD(temp) - EXPBIAS;
3879
    /*DP exponent bias*/
3880
    ST0 = expdif;
3881
    fpush();
3882
    BIASEXPONENT(temp);
3883
    ST0 = temp.d;
3884
}
3885

    
3886
void helper_fprem1(void)
3887
{
3888
    CPU86_LDouble dblq, fpsrcop, fptemp;
3889
    CPU86_LDoubleU fpsrcop1, fptemp1;
3890
    int expdif;
3891
    signed long long int q;
3892

    
3893
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3894
        ST0 = 0.0 / 0.0; /* NaN */
3895
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3896
        return;
3897
    }
3898

    
3899
    fpsrcop = ST0;
3900
    fptemp = ST1;
3901
    fpsrcop1.d = fpsrcop;
3902
    fptemp1.d = fptemp;
3903
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3904

    
3905
    if (expdif < 0) {
3906
        /* optimisation? taken from the AMD docs */
3907
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3908
        /* ST0 is unchanged */
3909
        return;
3910
    }
3911

    
3912
    if (expdif < 53) {
3913
        dblq = fpsrcop / fptemp;
3914
        /* round dblq towards nearest integer */
3915
        dblq = rint(dblq);
3916
        ST0 = fpsrcop - fptemp * dblq;
3917

    
3918
        /* convert dblq to q by truncating towards zero */
3919
        if (dblq < 0.0)
3920
           q = (signed long long int)(-dblq);
3921
        else
3922
           q = (signed long long int)dblq;
3923

    
3924
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3925
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3926
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3927
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3928
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3929
    } else {
3930
        env->fpus |= 0x400;  /* C2 <-- 1 */
3931
        fptemp = pow(2.0, expdif - 50);
3932
        fpsrcop = (ST0 / ST1) / fptemp;
3933
        /* fpsrcop = integer obtained by chopping */
3934
        fpsrcop = (fpsrcop < 0.0) ?
3935
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3936
        ST0 -= (ST1 * fpsrcop * fptemp);
3937
    }
3938
}
3939

    
3940
void helper_fprem(void)
3941
{
3942
    CPU86_LDouble dblq, fpsrcop, fptemp;
3943
    CPU86_LDoubleU fpsrcop1, fptemp1;
3944
    int expdif;
3945
    signed long long int q;
3946

    
3947
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3948
       ST0 = 0.0 / 0.0; /* NaN */
3949
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3950
       return;
3951
    }
3952

    
3953
    fpsrcop = (CPU86_LDouble)ST0;
3954
    fptemp = (CPU86_LDouble)ST1;
3955
    fpsrcop1.d = fpsrcop;
3956
    fptemp1.d = fptemp;
3957
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3958

    
3959
    if (expdif < 0) {
3960
        /* optimisation? taken from the AMD docs */
3961
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3962
        /* ST0 is unchanged */
3963
        return;
3964
    }
3965

    
3966
    if ( expdif < 53 ) {
3967
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3968
        /* round dblq towards zero */
3969
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3970
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3971

    
3972
        /* convert dblq to q by truncating towards zero */
3973
        if (dblq < 0.0)
3974
           q = (signed long long int)(-dblq);
3975
        else
3976
           q = (signed long long int)dblq;
3977

    
3978
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3979
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3980
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3981
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3982
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3983
    } else {
3984
        int N = 32 + (expdif % 32); /* as per AMD docs */
3985
        env->fpus |= 0x400;  /* C2 <-- 1 */
3986
        fptemp = pow(2.0, (double)(expdif - N));
3987
        fpsrcop = (ST0 / ST1) / fptemp;
3988
        /* fpsrcop = integer obtained by chopping */
3989
        fpsrcop = (fpsrcop < 0.0) ?
3990
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3991
        ST0 -= (ST1 * fpsrcop * fptemp);
3992
    }
3993
}
3994

    
3995
void helper_fyl2xp1(void)
3996
{
3997
    CPU86_LDouble fptemp;
3998

    
3999
    fptemp = ST0;
4000
    if ((fptemp+1.0)>0.0) {
4001
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4002
        ST1 *= fptemp;
4003
        fpop();
4004
    } else {
4005
        env->fpus &= (~0x4700);
4006
        env->fpus |= 0x400;
4007
    }
4008
}
4009

    
4010
void helper_fsqrt(void)
4011
{
4012
    CPU86_LDouble fptemp;
4013

    
4014
    fptemp = ST0;
4015
    if (fptemp<0.0) {
4016
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4017
        env->fpus |= 0x400;
4018
    }
4019
    ST0 = sqrt(fptemp);
4020
}
4021

    
4022
void helper_fsincos(void)
4023
{
4024
    CPU86_LDouble fptemp;
4025

    
4026
    fptemp = ST0;
4027
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4028
        env->fpus |= 0x400;
4029
    } else {
4030
        ST0 = sin(fptemp);
4031
        fpush();
4032
        ST0 = cos(fptemp);
4033
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4034
        /* the above code is for  |arg| < 2**63 only */
4035
    }
4036
}
4037

    
4038
void helper_frndint(void)
4039
{
4040
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4041
}
4042

    
4043
void helper_fscale(void)
4044
{
4045
    ST0 = ldexp (ST0, (int)(ST1));
4046
}
4047

    
4048
void helper_fsin(void)
4049
{
4050
    CPU86_LDouble fptemp;
4051

    
4052
    fptemp = ST0;
4053
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4054
        env->fpus |= 0x400;
4055
    } else {
4056
        ST0 = sin(fptemp);
4057
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4058
        /* the above code is for  |arg| < 2**53 only */
4059
    }
4060
}
4061

    
4062
void helper_fcos(void)
4063
{
4064
    CPU86_LDouble fptemp;
4065

    
4066
    fptemp = ST0;
4067
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4068
        env->fpus |= 0x400;
4069
    } else {
4070
        ST0 = cos(fptemp);
4071
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4072
        /* the above code is for  |arg5 < 2**63 only */
4073
    }
4074
}
4075

    
4076
void helper_fxam_ST0(void)
4077
{
4078
    CPU86_LDoubleU temp;
4079
    int expdif;
4080

    
4081
    temp.d = ST0;
4082

    
4083
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4084
    if (SIGND(temp))
4085
        env->fpus |= 0x200; /* C1 <-- 1 */
4086

    
4087
    /* XXX: test fptags too */
4088
    expdif = EXPD(temp);
4089
    if (expdif == MAXEXPD) {
4090
#ifdef USE_X86LDOUBLE
4091
        if (MANTD(temp) == 0x8000000000000000ULL)
4092
#else
4093
        if (MANTD(temp) == 0)
4094
#endif
4095
            env->fpus |=  0x500 /*Infinity*/;
4096
        else
4097
            env->fpus |=  0x100 /*NaN*/;
4098
    } else if (expdif == 0) {
4099
        if (MANTD(temp) == 0)
4100
            env->fpus |=  0x4000 /*Zero*/;
4101
        else
4102
            env->fpus |= 0x4400 /*Denormal*/;
4103
    } else {
4104
        env->fpus |= 0x400;
4105
    }
4106
}
4107

    
4108
void helper_fstenv(target_ulong ptr, int data32)
4109
{
4110
    int fpus, fptag, exp, i;
4111
    uint64_t mant;
4112
    CPU86_LDoubleU tmp;
4113

    
4114
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4115
    fptag = 0;
4116
    for (i=7; i>=0; i--) {
4117
        fptag <<= 2;
4118
        if (env->fptags[i]) {
4119
            fptag |= 3;
4120
        } else {
4121
            tmp.d = env->fpregs[i].d;
4122
            exp = EXPD(tmp);
4123
            mant = MANTD(tmp);
4124
            if (exp == 0 && mant == 0) {
4125
                /* zero */
4126
                fptag |= 1;
4127
            } else if (exp == 0 || exp == MAXEXPD
4128
#ifdef USE_X86LDOUBLE
4129
                       || (mant & (1LL << 63)) == 0
4130
#endif
4131
                       ) {
4132
                /* NaNs, infinity, denormal */
4133
                fptag |= 2;
4134
            }
4135
        }
4136
    }
4137
    if (data32) {
4138
        /* 32 bit */
4139
        stl(ptr, env->fpuc);
4140
        stl(ptr + 4, fpus);
4141
        stl(ptr + 8, fptag);
4142
        stl(ptr + 12, 0); /* fpip */
4143
        stl(ptr + 16, 0); /* fpcs */
4144
        stl(ptr + 20, 0); /* fpoo */
4145
        stl(ptr + 24, 0); /* fpos */
4146
    } else {
4147
        /* 16 bit */
4148
        stw(ptr, env->fpuc);
4149
        stw(ptr + 2, fpus);
4150
        stw(ptr + 4, fptag);
4151
        stw(ptr + 6, 0);
4152
        stw(ptr + 8, 0);
4153
        stw(ptr + 10, 0);
4154
        stw(ptr + 12, 0);
4155
    }
4156
}
4157

    
4158
void helper_fldenv(target_ulong ptr, int data32)
4159
{
4160
    int i, fpus, fptag;
4161

    
4162
    if (data32) {
4163
        env->fpuc = lduw(ptr);
4164
        fpus = lduw(ptr + 4);
4165
        fptag = lduw(ptr + 8);
4166
    }
4167
    else {
4168
        env->fpuc = lduw(ptr);
4169
        fpus = lduw(ptr + 2);
4170
        fptag = lduw(ptr + 4);
4171
    }
4172
    env->fpstt = (fpus >> 11) & 7;
4173
    env->fpus = fpus & ~0x3800;
4174
    for(i = 0;i < 8; i++) {
4175
        env->fptags[i] = ((fptag & 3) == 3);
4176
        fptag >>= 2;
4177
    }
4178
}
4179

    
4180
void helper_fsave(target_ulong ptr, int data32)
4181
{
4182
    CPU86_LDouble tmp;
4183
    int i;
4184

    
4185
    helper_fstenv(ptr, data32);
4186

    
4187
    ptr += (14 << data32);
4188
    for(i = 0;i < 8; i++) {
4189
        tmp = ST(i);
4190
        helper_fstt(tmp, ptr);
4191
        ptr += 10;
4192
    }
4193

    
4194
    /* fninit */
4195
    env->fpus = 0;
4196
    env->fpstt = 0;
4197
    env->fpuc = 0x37f;
4198
    env->fptags[0] = 1;
4199
    env->fptags[1] = 1;
4200
    env->fptags[2] = 1;
4201
    env->fptags[3] = 1;
4202
    env->fptags[4] = 1;
4203
    env->fptags[5] = 1;
4204
    env->fptags[6] = 1;
4205
    env->fptags[7] = 1;
4206
}
4207

    
4208
void helper_frstor(target_ulong ptr, int data32)
4209
{
4210
    CPU86_LDouble tmp;
4211
    int i;
4212

    
4213
    helper_fldenv(ptr, data32);
4214
    ptr += (14 << data32);
4215

    
4216
    for(i = 0;i < 8; i++) {
4217
        tmp = helper_fldt(ptr);
4218
        ST(i) = tmp;
4219
        ptr += 10;
4220
    }
4221
}
4222

    
4223
void helper_fxsave(target_ulong ptr, int data64)
4224
{
4225
    int fpus, fptag, i, nb_xmm_regs;
4226
    CPU86_LDouble tmp;
4227
    target_ulong addr;
4228

    
4229
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4230
    fptag = 0;
4231
    for(i = 0; i < 8; i++) {
4232
        fptag |= (env->fptags[i] << i);
4233
    }
4234
    stw(ptr, env->fpuc);
4235
    stw(ptr + 2, fpus);
4236
    stw(ptr + 4, fptag ^ 0xff);
4237

    
4238
    addr = ptr + 0x20;
4239
    for(i = 0;i < 8; i++) {
4240
        tmp = ST(i);
4241
        helper_fstt(tmp, addr);
4242
        addr += 16;
4243
    }
4244

    
4245
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4246
        /* XXX: finish it */
4247
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4248
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4249
        nb_xmm_regs = 8 << data64;
4250
        addr = ptr + 0xa0;
4251
        for(i = 0; i < nb_xmm_regs; i++) {
4252
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4253
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4254
            addr += 16;
4255
        }
4256
    }
4257
}
4258

    
4259
void helper_fxrstor(target_ulong ptr, int data64)
4260
{
4261
    int i, fpus, fptag, nb_xmm_regs;
4262
    CPU86_LDouble tmp;
4263
    target_ulong addr;
4264

    
4265
    env->fpuc = lduw(ptr);
4266
    fpus = lduw(ptr + 2);
4267
    fptag = lduw(ptr + 4);
4268
    env->fpstt = (fpus >> 11) & 7;
4269
    env->fpus = fpus & ~0x3800;
4270
    fptag ^= 0xff;
4271
    for(i = 0;i < 8; i++) {
4272
        env->fptags[i] = ((fptag >> i) & 1);
4273
    }
4274

    
4275
    addr = ptr + 0x20;
4276
    for(i = 0;i < 8; i++) {
4277
        tmp = helper_fldt(addr);
4278
        ST(i) = tmp;
4279
        addr += 16;
4280
    }
4281

    
4282
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4283
        /* XXX: finish it */
4284
        env->mxcsr = ldl(ptr + 0x18);
4285
        //ldl(ptr + 0x1c);
4286
        nb_xmm_regs = 8 << data64;
4287
        addr = ptr + 0xa0;
4288
        for(i = 0; i < nb_xmm_regs; i++) {
4289
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4290
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4291
            addr += 16;
4292
        }
4293
    }
4294
}
4295

    
4296
#ifndef USE_X86LDOUBLE
4297

    
4298
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4299
{
4300
    CPU86_LDoubleU temp;
4301
    int e;
4302

    
4303
    temp.d = f;
4304
    /* mantissa */
4305
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4306
    /* exponent + sign */
4307
    e = EXPD(temp) - EXPBIAS + 16383;
4308
    e |= SIGND(temp) >> 16;
4309
    *pexp = e;
4310
}
4311

    
4312
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4313
{
4314
    CPU86_LDoubleU temp;
4315
    int e;
4316
    uint64_t ll;
4317

    
4318
    /* XXX: handle overflow ? */
4319
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4320
    e |= (upper >> 4) & 0x800; /* sign */
4321
    ll = (mant >> 11) & ((1LL << 52) - 1);
4322
#ifdef __arm__
4323
    temp.l.upper = (e << 20) | (ll >> 32);
4324
    temp.l.lower = ll;
4325
#else
4326
    temp.ll = ll | ((uint64_t)e << 52);
4327
#endif
4328
    return temp.d;
4329
}
4330

    
4331
#else
4332

    
4333
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4334
{
4335
    CPU86_LDoubleU temp;
4336

    
4337
    temp.d = f;
4338
    *pmant = temp.l.lower;
4339
    *pexp = temp.l.upper;
4340
}
4341

    
4342
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4343
{
4344
    CPU86_LDoubleU temp;
4345

    
4346
    temp.l.upper = upper;
4347
    temp.l.lower = mant;
4348
    return temp.d;
4349
}
4350
#endif
4351

    
4352
#ifdef TARGET_X86_64
4353

    
4354
//#define DEBUG_MULDIV
4355

    
4356
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4357
{
4358
    *plow += a;
4359
    /* carry test */
4360
    if (*plow < a)
4361
        (*phigh)++;
4362
    *phigh += b;
4363
}
4364

    
4365
static void neg128(uint64_t *plow, uint64_t *phigh)
4366
{
4367
    *plow = ~ *plow;
4368
    *phigh = ~ *phigh;
4369
    add128(plow, phigh, 1, 0);
4370
}
4371

    
4372
/* return TRUE if overflow */
4373
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4374
{
4375
    uint64_t q, r, a1, a0;
4376
    int i, qb, ab;
4377

    
4378
    a0 = *plow;
4379
    a1 = *phigh;
4380
    if (a1 == 0) {
4381
        q = a0 / b;
4382
        r = a0 % b;
4383
        *plow = q;
4384
        *phigh = r;
4385
    } else {
4386
        if (a1 >= b)
4387
            return 1;
4388
        /* XXX: use a better algorithm */
4389
        for(i = 0; i < 64; i++) {
4390
            ab = a1 >> 63;
4391
            a1 = (a1 << 1) | (a0 >> 63);
4392
            if (ab || a1 >= b) {
4393
                a1 -= b;
4394
                qb = 1;
4395
            } else {
4396
                qb = 0;
4397
            }
4398
            a0 = (a0 << 1) | qb;
4399
        }
4400
#if defined(DEBUG_MULDIV)
4401
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4402
               *phigh, *plow, b, a0, a1);
4403
#endif
4404
        *plow = a0;
4405
        *phigh = a1;
4406
    }
4407
    return 0;
4408
}
4409

    
4410
/* return TRUE if overflow */
4411
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4412
{
4413
    int sa, sb;
4414
    sa = ((int64_t)*phigh < 0);
4415
    if (sa)
4416
        neg128(plow, phigh);
4417
    sb = (b < 0);
4418
    if (sb)
4419
        b = -b;
4420
    if (div64(plow, phigh, b) != 0)
4421
        return 1;
4422
    if (sa ^ sb) {
4423
        if (*plow > (1ULL << 63))
4424
            return 1;
4425
        *plow = - *plow;
4426
    } else {
4427
        if (*plow >= (1ULL << 63))
4428
            return 1;
4429
    }
4430
    if (sa)
4431
        *phigh = - *phigh;
4432
    return 0;
4433
}
4434

    
4435
void helper_mulq_EAX_T0(target_ulong t0)
4436
{
4437
    uint64_t r0, r1;
4438

    
4439
    mulu64(&r0, &r1, EAX, t0);
4440
    EAX = r0;
4441
    EDX = r1;
4442
    CC_DST = r0;
4443
    CC_SRC = r1;
4444
}
4445

    
4446
void helper_imulq_EAX_T0(target_ulong t0)
4447
{
4448
    uint64_t r0, r1;
4449

    
4450
    muls64(&r0, &r1, EAX, t0);
4451
    EAX = r0;
4452
    EDX = r1;
4453
    CC_DST = r0;
4454
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4455
}
4456

    
4457
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4458
{
4459
    uint64_t r0, r1;
4460

    
4461
    muls64(&r0, &r1, t0, t1);
4462
    CC_DST = r0;
4463
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4464
    return r0;
4465
}
4466

    
4467
void helper_divq_EAX(target_ulong t0)
4468
{
4469
    uint64_t r0, r1;
4470
    if (t0 == 0) {
4471
        raise_exception(EXCP00_DIVZ);
4472
    }
4473
    r0 = EAX;
4474
    r1 = EDX;
4475
    if (div64(&r0, &r1, t0))
4476
        raise_exception(EXCP00_DIVZ);
4477
    EAX = r0;
4478
    EDX = r1;
4479
}
4480

    
4481
void helper_idivq_EAX(target_ulong t0)
4482
{
4483
    uint64_t r0, r1;
4484
    if (t0 == 0) {
4485
        raise_exception(EXCP00_DIVZ);
4486
    }
4487
    r0 = EAX;
4488
    r1 = EDX;
4489
    if (idiv64(&r0, &r1, t0))
4490
        raise_exception(EXCP00_DIVZ);
4491
    EAX = r0;
4492
    EDX = r1;
4493
}
4494
#endif
4495

    
4496
void helper_hlt(void)
4497
{
4498
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4499
    env->hflags |= HF_HALTED_MASK;
4500
    env->exception_index = EXCP_HLT;
4501
    cpu_loop_exit();
4502
}
4503

    
4504
void helper_monitor(target_ulong ptr)
4505
{
4506
    if ((uint32_t)ECX != 0)
4507
        raise_exception(EXCP0D_GPF);
4508
    /* XXX: store address ? */
4509
}
4510

    
4511
void helper_mwait(void)
4512
{
4513
    if ((uint32_t)ECX != 0)
4514
        raise_exception(EXCP0D_GPF);
4515
    /* XXX: not complete but not completely erroneous */
4516
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4517
        /* more than one CPU: do not sleep because another CPU may
4518
           wake this one */
4519
    } else {
4520
        helper_hlt();
4521
    }
4522
}
4523

    
4524
void helper_debug(void)
4525
{
4526
    env->exception_index = EXCP_DEBUG;
4527
    cpu_loop_exit();
4528
}
4529

    
4530
void helper_raise_interrupt(int intno, int next_eip_addend)
4531
{
4532
    raise_interrupt(intno, 1, 0, next_eip_addend);
4533
}
4534

    
4535
void helper_raise_exception(int exception_index)
4536
{
4537
    raise_exception(exception_index);
4538
}
4539

    
4540
void helper_cli(void)
4541
{
4542
    env->eflags &= ~IF_MASK;
4543
}
4544

    
4545
void helper_sti(void)
4546
{
4547
    env->eflags |= IF_MASK;
4548
}
4549

    
4550
#if 0
4551
/* vm86plus instructions */
4552
void helper_cli_vm(void)
4553
{
4554
    env->eflags &= ~VIF_MASK;
4555
}
4556

4557
void helper_sti_vm(void)
4558
{
4559
    env->eflags |= VIF_MASK;
4560
    if (env->eflags & VIP_MASK) {
4561
        raise_exception(EXCP0D_GPF);
4562
    }
4563
}
4564
#endif
4565

    
4566
void helper_set_inhibit_irq(void)
4567
{
4568
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4569
}
4570

    
4571
void helper_reset_inhibit_irq(void)
4572
{
4573
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4574
}
4575

    
4576
void helper_boundw(target_ulong a0, int v)
4577
{
4578
    int low, high;
4579
    low = ldsw(a0);
4580
    high = ldsw(a0 + 2);
4581
    v = (int16_t)v;
4582
    if (v < low || v > high) {
4583
        raise_exception(EXCP05_BOUND);
4584
    }
4585
    FORCE_RET();
4586
}
4587

    
4588
void helper_boundl(target_ulong a0, int v)
4589
{
4590
    int low, high;
4591
    low = ldl(a0);
4592
    high = ldl(a0 + 4);
4593
    if (v < low || v > high) {
4594
        raise_exception(EXCP05_BOUND);
4595
    }
4596
    FORCE_RET();
4597
}
4598

    
4599
static float approx_rsqrt(float a)
4600
{
4601
    return 1.0 / sqrt(a);
4602
}
4603

    
4604
static float approx_rcp(float a)
4605
{
4606
    return 1.0 / a;
4607
}
4608

    
4609
#if !defined(CONFIG_USER_ONLY)
4610

    
4611
#define MMUSUFFIX _mmu
4612
#ifdef __s390__
4613
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4614
#else
4615
# define GETPC() (__builtin_return_address(0))
4616
#endif
4617

    
4618
#define SHIFT 0
4619
#include "softmmu_template.h"
4620

    
4621
#define SHIFT 1
4622
#include "softmmu_template.h"
4623

    
4624
#define SHIFT 2
4625
#include "softmmu_template.h"
4626

    
4627
#define SHIFT 3
4628
#include "softmmu_template.h"
4629

    
4630
#endif
4631

    
4632
/* try to fill the TLB and return an exception if error. If retaddr is
4633
   NULL, it means that the function was called in C code (i.e. not
4634
   from generated code or from helper.c) */
4635
/* XXX: fix it to restore all registers */
4636
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4637
{
4638
    TranslationBlock *tb;
4639
    int ret;
4640
    unsigned long pc;
4641
    CPUX86State *saved_env;
4642

    
4643
    /* XXX: hack to restore env in all cases, even if not called from
4644
       generated code */
4645
    saved_env = env;
4646
    env = cpu_single_env;
4647

    
4648
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4649
    if (ret) {
4650
        if (retaddr) {
4651
            /* now we have a real cpu fault */
4652
            pc = (unsigned long)retaddr;
4653
            tb = tb_find_pc(pc);
4654
            if (tb) {
4655
                /* the PC is inside the translated code. It means that we have
4656
                   a virtual CPU fault */
4657
                cpu_restore_state(tb, env, pc, NULL);
4658
            }
4659
        }
4660
        if (retaddr)
4661
            raise_exception_err(env->exception_index, env->error_code);
4662
        else
4663
            raise_exception_err_norestore(env->exception_index, env->error_code);
4664
    }
4665
    env = saved_env;
4666
}
4667

    
4668

    
4669
/* Secure Virtual Machine helpers */
4670

    
4671
void helper_stgi(void)
4672
{
4673
    env->hflags |= HF_GIF_MASK;
4674
}
4675

    
4676
void helper_clgi(void)
4677
{
4678
    env->hflags &= ~HF_GIF_MASK;
4679
}
4680

    
4681
#if defined(CONFIG_USER_ONLY)
4682

    
4683
void helper_vmrun(void) 
4684
{ 
4685
}
4686
void helper_vmmcall(void) 
4687
{ 
4688
}
4689
void helper_vmload(void) 
4690
{ 
4691
}
4692
void helper_vmsave(void) 
4693
{ 
4694
}
4695
void helper_skinit(void) 
4696
{ 
4697
}
4698
void helper_invlpga(void) 
4699
{ 
4700
}
4701
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4702
{ 
4703
}
4704
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4705
{
4706
}
4707

    
4708
void helper_svm_check_io(uint32_t port, uint32_t param, 
4709
                         uint32_t next_eip_addend)
4710
{
4711
}
4712
#else
4713

    
4714
static inline uint32_t
4715
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4716
{
4717
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4718
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4719
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4720
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4721
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4722
}
4723

    
4724
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4725
{
4726
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4727
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4728
}
4729

    
4730
void helper_vmrun(void)
4731
{
4732
    target_ulong addr;
4733
    uint32_t event_inj;
4734
    uint32_t int_ctl;
4735

    
4736
    addr = EAX;
4737
    if (loglevel & CPU_LOG_TB_IN_ASM)
4738
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4739

    
4740
    env->vm_vmcb = addr;
4741

    
4742
    /* save the current CPU state in the hsave page */
4743
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4744
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4745

    
4746
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4747
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4748

    
4749
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4750
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4751
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4752
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4753
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4754
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4755
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4756

    
4757
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4758
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4759

    
4760
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4761
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4762
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4763
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4764

    
4765
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4766
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4767
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4768

    
4769
    /* load the interception bitmaps so we do not need to access the
4770
       vmcb in svm mode */
4771
    /* We shift all the intercept bits so we can OR them with the TB
4772
       flags later on */
4773
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4774
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4775
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4776
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4777
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4778
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4779

    
4780
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4781
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4782

    
4783
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4784
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4785

    
4786
    /* clear exit_info_2 so we behave like the real hardware */
4787
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4788

    
4789
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4790
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4791
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4792
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4793
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4794
    if (int_ctl & V_INTR_MASKING_MASK) {
4795
        env->cr[8] = int_ctl & V_TPR_MASK;
4796
        cpu_set_apic_tpr(env, env->cr[8]);
4797
        if (env->eflags & IF_MASK)
4798
            env->hflags |= HF_HIF_MASK;
4799
    }
4800

    
4801
#ifdef TARGET_X86_64
4802
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4803
    env->hflags &= ~HF_LMA_MASK;
4804
    if (env->efer & MSR_EFER_LMA)
4805
       env->hflags |= HF_LMA_MASK;
4806
#endif
4807
    env->eflags = 0;
4808
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4809
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4810
    CC_OP = CC_OP_EFLAGS;
4811
    CC_DST = 0xffffffff;
4812

    
4813
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4814
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4815
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4816
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4817

    
4818
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4819
    env->eip = EIP;
4820
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4821
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4822
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4823
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4824
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4825

    
4826
    /* FIXME: guest state consistency checks */
4827

    
4828
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4829
        case TLB_CONTROL_DO_NOTHING:
4830
            break;
4831
        case TLB_CONTROL_FLUSH_ALL_ASID:
4832
            /* FIXME: this is not 100% correct but should work for now */
4833
            tlb_flush(env, 1);
4834
        break;
4835
    }
4836

    
4837
    helper_stgi();
4838

    
4839
    /* maybe we need to inject an event */
4840
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4841
    if (event_inj & SVM_EVTINJ_VALID) {
4842
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4843
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4844
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4845
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4846

    
4847
        if (loglevel & CPU_LOG_TB_IN_ASM)
4848
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4849
        /* FIXME: need to implement valid_err */
4850
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4851
        case SVM_EVTINJ_TYPE_INTR:
4852
                env->exception_index = vector;
4853
                env->error_code = event_inj_err;
4854
                env->exception_is_int = 0;
4855
                env->exception_next_eip = -1;
4856
                if (loglevel & CPU_LOG_TB_IN_ASM)
4857
                    fprintf(logfile, "INTR");
4858
                break;
4859
        case SVM_EVTINJ_TYPE_NMI:
4860
                env->exception_index = vector;
4861
                env->error_code = event_inj_err;
4862
                env->exception_is_int = 0;
4863
                env->exception_next_eip = EIP;
4864
                if (loglevel & CPU_LOG_TB_IN_ASM)
4865
                    fprintf(logfile, "NMI");
4866
                break;
4867
        case SVM_EVTINJ_TYPE_EXEPT:
4868
                env->exception_index = vector;
4869
                env->error_code = event_inj_err;
4870
                env->exception_is_int = 0;
4871
                env->exception_next_eip = -1;
4872
                if (loglevel & CPU_LOG_TB_IN_ASM)
4873
                    fprintf(logfile, "EXEPT");
4874
                break;
4875
        case SVM_EVTINJ_TYPE_SOFT:
4876
                env->exception_index = vector;
4877
                env->error_code = event_inj_err;
4878
                env->exception_is_int = 1;
4879
                env->exception_next_eip = EIP;
4880
                if (loglevel & CPU_LOG_TB_IN_ASM)
4881
                    fprintf(logfile, "SOFT");
4882
                break;
4883
        }
4884
        if (loglevel & CPU_LOG_TB_IN_ASM)
4885
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4886
    }
4887
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4888
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4889
    }
4890

    
4891
    cpu_loop_exit();
4892
}
4893

    
4894
void helper_vmmcall(void)
4895
{
4896
    if (loglevel & CPU_LOG_TB_IN_ASM)
4897
        fprintf(logfile,"vmmcall!\n");
4898
}
4899

    
4900
void helper_vmload(void)
4901
{
4902
    target_ulong addr;
4903
    addr = EAX;
4904
    if (loglevel & CPU_LOG_TB_IN_ASM)
4905
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4906
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4907
                env->segs[R_FS].base);
4908

    
4909
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4910
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4911
    SVM_LOAD_SEG2(addr, tr, tr);
4912
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4913

    
4914
#ifdef TARGET_X86_64
4915
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4916
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4917
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4918
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4919
#endif
4920
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4921
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4922
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4923
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4924
}
4925

    
4926
void helper_vmsave(void)
4927
{
4928
    target_ulong addr;
4929
    addr = EAX;
4930
    if (loglevel & CPU_LOG_TB_IN_ASM)
4931
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4932
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4933
                env->segs[R_FS].base);
4934

    
4935
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4936
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4937
    SVM_SAVE_SEG(addr, tr, tr);
4938
    SVM_SAVE_SEG(addr, ldt, ldtr);
4939

    
4940
#ifdef TARGET_X86_64
4941
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4942
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4943
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4944
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4945
#endif
4946
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4947
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4948
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4949
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4950
}
4951

    
4952
void helper_skinit(void)
4953
{
4954
    if (loglevel & CPU_LOG_TB_IN_ASM)
4955
        fprintf(logfile,"skinit!\n");
4956
}
4957

    
4958
void helper_invlpga(void)
4959
{
4960
    tlb_flush(env, 0);
4961
}
4962

    
4963
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4964
{
4965
    switch(type) {
4966
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4967
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4968
            helper_vmexit(type, param);
4969
        }
4970
        break;
4971
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4972
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4973
            helper_vmexit(type, param);
4974
        }
4975
        break;
4976
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4977
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4978
            helper_vmexit(type, param);
4979
        }
4980
        break;
4981
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4982
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4983
            helper_vmexit(type, param);
4984
        }
4985
        break;
4986
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4987
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4988
            helper_vmexit(type, param);
4989
        }
4990
        break;
4991
    case SVM_EXIT_IOIO:
4992
        break;
4993

    
4994
    case SVM_EXIT_MSR:
4995
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4996
            /* FIXME: this should be read in at vmrun (faster this way?) */
4997
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4998
            uint32_t t0, t1;
4999
            switch((uint32_t)ECX) {
5000
            case 0 ... 0x1fff:
5001
                t0 = (ECX * 2) % 8;
5002
                t1 = ECX / 8;
5003
                break;
5004
            case 0xc0000000 ... 0xc0001fff:
5005
                t0 = (8192 + ECX - 0xc0000000) * 2;
5006
                t1 = (t0 / 8);
5007
                t0 %= 8;
5008
                break;
5009
            case 0xc0010000 ... 0xc0011fff:
5010
                t0 = (16384 + ECX - 0xc0010000) * 2;
5011
                t1 = (t0 / 8);
5012
                t0 %= 8;
5013
                break;
5014
            default:
5015
                helper_vmexit(type, param);
5016
                t0 = 0;
5017
                t1 = 0;
5018
                break;
5019
            }
5020
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5021
                helper_vmexit(type, param);
5022
        }
5023
        break;
5024
    default:
5025
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5026
            helper_vmexit(type, param);
5027
        }
5028
        break;
5029
    }
5030
}
5031

    
5032
void helper_svm_check_io(uint32_t port, uint32_t param, 
5033
                         uint32_t next_eip_addend)
5034
{
5035
    if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5036
        /* FIXME: this should be read in at vmrun (faster this way?) */
5037
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5038
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5039
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5040
            /* next EIP */
5041
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5042
                     env->eip + next_eip_addend);
5043
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5044
        }
5045
    }
5046
}
5047

    
5048
/* Note: currently only 32 bits of exit_code are used */
5049
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5050
{
5051
    uint32_t int_ctl;
5052

    
5053
    if (loglevel & CPU_LOG_TB_IN_ASM)
5054
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5055
                exit_code, exit_info_1,
5056
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5057
                EIP);
5058

    
5059
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5060
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5061
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5062
    } else {
5063
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5064
    }
5065

    
5066
    /* Save the VM state in the vmcb */
5067
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5068
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5069
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5070
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5071

    
5072
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5073
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5074

    
5075
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5076
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5077

    
5078
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5079
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5080
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5081
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5082
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5083

    
5084
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5085
        int_ctl &= ~V_TPR_MASK;
5086
        int_ctl |= env->cr[8] & V_TPR_MASK;
5087
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5088
    }
5089

    
5090
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5091
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5092
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5093
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5094
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5095
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5096
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5097

    
5098
    /* Reload the host state from vm_hsave */
5099
    env->hflags &= ~HF_HIF_MASK;
5100
    env->intercept = 0;
5101
    env->intercept_exceptions = 0;
5102
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5103

    
5104
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5105
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5106

    
5107
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5108
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5109

    
5110
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5111
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5112
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5113
    if (int_ctl & V_INTR_MASKING_MASK) {
5114
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5115
        cpu_set_apic_tpr(env, env->cr[8]);
5116
    }
5117
    /* we need to set the efer after the crs so the hidden flags get set properly */
5118
#ifdef TARGET_X86_64
5119
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5120
    env->hflags &= ~HF_LMA_MASK;
5121
    if (env->efer & MSR_EFER_LMA)
5122
       env->hflags |= HF_LMA_MASK;
5123
#endif
5124

    
5125
    env->eflags = 0;
5126
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5127
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5128
    CC_OP = CC_OP_EFLAGS;
5129

    
5130
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
5131
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5132
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5133
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5134

    
5135
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5136
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5137
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5138

    
5139
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5140
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5141

    
5142
    /* other setups */
5143
    cpu_x86_set_cpl(env, 0);
5144
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5145
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5146

    
5147
    helper_clgi();
5148
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5149

    
5150
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5151

    
5152
    /* Clears the TSC_OFFSET inside the processor. */
5153

    
5154
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5155
       from the page table indicated the host's CR3. If the PDPEs contain
5156
       illegal state, the processor causes a shutdown. */
5157

    
5158
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5159
    env->cr[0] |= CR0_PE_MASK;
5160
    env->eflags &= ~VM_MASK;
5161

    
5162
    /* Disables all breakpoints in the host DR7 register. */
5163

    
5164
    /* Checks the reloaded host state for consistency. */
5165

    
5166
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5167
       host's code segment or non-canonical (in the case of long mode), a
5168
       #GP fault is delivered inside the host.) */
5169

    
5170
    /* remove any pending exception */
5171
    env->exception_index = -1;
5172
    env->error_code = 0;
5173
    env->old_exception = -1;
5174

    
5175
    cpu_loop_exit();
5176
}
5177

    
5178
#endif
5179

    
5180
/* MMX/SSE */
5181
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5182
void helper_enter_mmx(void)
5183
{
5184
    env->fpstt = 0;
5185
    *(uint32_t *)(env->fptags) = 0;
5186
    *(uint32_t *)(env->fptags + 4) = 0;
5187
}
5188

    
5189
void helper_emms(void)
5190
{
5191
    /* set to empty state */
5192
    *(uint32_t *)(env->fptags) = 0x01010101;
5193
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5194
}
5195

    
5196
/* XXX: suppress */
5197
void helper_movq(uint64_t *d, uint64_t *s)
5198
{
5199
    *d = *s;
5200
}
5201

    
5202
#define SHIFT 0
5203
#include "ops_sse.h"
5204

    
5205
#define SHIFT 1
5206
#include "ops_sse.h"
5207

    
5208
#define SHIFT 0
5209
#include "helper_template.h"
5210
#undef SHIFT
5211

    
5212
#define SHIFT 1
5213
#include "helper_template.h"
5214
#undef SHIFT
5215

    
5216
#define SHIFT 2
5217
#include "helper_template.h"
5218
#undef SHIFT
5219

    
5220
#ifdef TARGET_X86_64
5221

    
5222
#define SHIFT 3
5223
#include "helper_template.h"
5224
#undef SHIFT
5225

    
5226
#endif
5227

    
5228
/* bit operations */
5229
target_ulong helper_bsf(target_ulong t0)
5230
{
5231
    int count;
5232
    target_ulong res;
5233

    
5234
    res = t0;
5235
    count = 0;
5236
    while ((res & 1) == 0) {
5237
        count++;
5238
        res >>= 1;
5239
    }
5240
    return count;
5241
}
5242

    
5243
target_ulong helper_bsr(target_ulong t0)
5244
{
5245
    int count;
5246
    target_ulong res, mask;
5247
    
5248
    res = t0;
5249
    count = TARGET_LONG_BITS - 1;
5250
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5251
    while ((res & mask) == 0) {
5252
        count--;
5253
        res <<= 1;
5254
    }
5255
    return count;
5256
}
5257

    
5258

    
5259
static int compute_all_eflags(void)
5260
{
5261
    return CC_SRC;
5262
}
5263

    
5264
static int compute_c_eflags(void)
5265
{
5266
    return CC_SRC & CC_C;
5267
}
5268

    
5269
CCTable cc_table[CC_OP_NB] = {
5270
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5271

    
5272
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5273

    
5274
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5275
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5276
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5277

    
5278
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5279
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5280
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5281

    
5282
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5283
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5284
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5285

    
5286
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5287
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5288
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5289

    
5290
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5291
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5292
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5293

    
5294
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5295
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5296
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5297

    
5298
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5299
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5300
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5301

    
5302
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5303
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5304
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5305

    
5306
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5307
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5308
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5309

    
5310
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5311
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5312
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5313

    
5314
#ifdef TARGET_X86_64
5315
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5316

    
5317
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5318

    
5319
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5320

    
5321
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5322

    
5323
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5324

    
5325
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5326

    
5327
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5328

    
5329
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5330

    
5331
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5332

    
5333
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5334
#endif
5335
};
5336