Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 2436b61a

History | View | Annotate | Download (155 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* in 64-bit machines, this can overflow. So this segment addition macro
594
 * can be used to trim the value to 32-bit whenever needed */
595
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
596

    
597
/* XXX: add a is_user flag to have proper security support */
598
#define PUSHW(ssp, sp, sp_mask, val)\
599
{\
600
    sp -= 2;\
601
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
602
}
603

    
604
#define PUSHL(ssp, sp, sp_mask, val)\
605
{\
606
    sp -= 4;\
607
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
608
}
609

    
610
#define POPW(ssp, sp, sp_mask, val)\
611
{\
612
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613
    sp += 2;\
614
}
615

    
616
#define POPL(ssp, sp, sp_mask, val)\
617
{\
618
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
619
    sp += 4;\
620
}
621

    
622
/* protected mode interrupt */
623
static void do_interrupt_protected(int intno, int is_int, int error_code,
624
                                   unsigned int next_eip, int is_hw)
625
{
626
    SegmentCache *dt;
627
    target_ulong ptr, ssp;
628
    int type, dpl, selector, ss_dpl, cpl;
629
    int has_error_code, new_stack, shift;
630
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631
    uint32_t old_eip, sp_mask;
632

    
633
    has_error_code = 0;
634
    if (!is_int && !is_hw) {
635
        switch(intno) {
636
        case 8:
637
        case 10:
638
        case 11:
639
        case 12:
640
        case 13:
641
        case 14:
642
        case 17:
643
            has_error_code = 1;
644
            break;
645
        }
646
    }
647
    if (is_int)
648
        old_eip = next_eip;
649
    else
650
        old_eip = env->eip;
651

    
652
    dt = &env->idt;
653
    if (intno * 8 + 7 > dt->limit)
654
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655
    ptr = dt->base + intno * 8;
656
    e1 = ldl_kernel(ptr);
657
    e2 = ldl_kernel(ptr + 4);
658
    /* check gate type */
659
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660
    switch(type) {
661
    case 5: /* task gate */
662
        /* must do that check here to return the correct error code */
663
        if (!(e2 & DESC_P_MASK))
664
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666
        if (has_error_code) {
667
            int type;
668
            uint32_t mask;
669
            /* push the error code */
670
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671
            shift = type >> 3;
672
            if (env->segs[R_SS].flags & DESC_B_MASK)
673
                mask = 0xffffffff;
674
            else
675
                mask = 0xffff;
676
            esp = (ESP - (2 << shift)) & mask;
677
            ssp = env->segs[R_SS].base + esp;
678
            if (shift)
679
                stl_kernel(ssp, error_code);
680
            else
681
                stw_kernel(ssp, error_code);
682
            SET_ESP(esp, mask);
683
        }
684
        return;
685
    case 6: /* 286 interrupt gate */
686
    case 7: /* 286 trap gate */
687
    case 14: /* 386 interrupt gate */
688
    case 15: /* 386 trap gate */
689
        break;
690
    default:
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
        break;
693
    }
694
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695
    cpl = env->hflags & HF_CPL_MASK;
696
    /* check privilege if software int */
697
    if (is_int && dpl < cpl)
698
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699
    /* check valid bit */
700
    if (!(e2 & DESC_P_MASK))
701
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702
    selector = e1 >> 16;
703
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704
    if ((selector & 0xfffc) == 0)
705
        raise_exception_err(EXCP0D_GPF, 0);
706

    
707
    if (load_segment(&e1, &e2, selector) != 0)
708
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712
    if (dpl > cpl)
713
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714
    if (!(e2 & DESC_P_MASK))
715
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717
        /* to inner privilege */
718
        get_ss_esp_from_tss(&ss, &esp, dpl);
719
        if ((ss & 0xfffc) == 0)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if ((ss & 3) != dpl)
722
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726
        if (ss_dpl != dpl)
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if (!(ss_e2 & DESC_S_MASK) ||
729
            (ss_e2 & DESC_CS_MASK) ||
730
            !(ss_e2 & DESC_W_MASK))
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        if (!(ss_e2 & DESC_P_MASK))
733
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734
        new_stack = 1;
735
        sp_mask = get_sp_mask(ss_e2);
736
        ssp = get_seg_base(ss_e1, ss_e2);
737
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738
        /* to same privilege */
739
        if (env->eflags & VM_MASK)
740
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0;
742
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
743
        ssp = env->segs[R_SS].base;
744
        esp = ESP;
745
        dpl = cpl;
746
    } else {
747
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748
        new_stack = 0; /* avoid warning */
749
        sp_mask = 0; /* avoid warning */
750
        ssp = 0; /* avoid warning */
751
        esp = 0; /* avoid warning */
752
    }
753

    
754
    shift = type >> 3;
755

    
756
#if 0
757
    /* XXX: check that enough room is available */
758
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759
    if (env->eflags & VM_MASK)
760
        push_size += 8;
761
    push_size <<= shift;
762
#endif
763
    if (shift == 1) {
764
        if (new_stack) {
765
            if (env->eflags & VM_MASK) {
766
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
770
            }
771
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772
            PUSHL(ssp, esp, sp_mask, ESP);
773
        }
774
        PUSHL(ssp, esp, sp_mask, compute_eflags());
775
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776
        PUSHL(ssp, esp, sp_mask, old_eip);
777
        if (has_error_code) {
778
            PUSHL(ssp, esp, sp_mask, error_code);
779
        }
780
    } else {
781
        if (new_stack) {
782
            if (env->eflags & VM_MASK) {
783
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
787
            }
788
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789
            PUSHW(ssp, esp, sp_mask, ESP);
790
        }
791
        PUSHW(ssp, esp, sp_mask, compute_eflags());
792
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793
        PUSHW(ssp, esp, sp_mask, old_eip);
794
        if (has_error_code) {
795
            PUSHW(ssp, esp, sp_mask, error_code);
796
        }
797
    }
798

    
799
    if (new_stack) {
800
        if (env->eflags & VM_MASK) {
801
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
805
        }
806
        ss = (ss & ~3) | dpl;
807
        cpu_x86_load_seg_cache(env, R_SS, ss,
808
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
809
    }
810
    SET_ESP(esp, sp_mask);
811

    
812
    selector = (selector & ~3) | dpl;
813
    cpu_x86_load_seg_cache(env, R_CS, selector,
814
                   get_seg_base(e1, e2),
815
                   get_seg_limit(e1, e2),
816
                   e2);
817
    cpu_x86_set_cpl(env, dpl);
818
    env->eip = offset;
819

    
820
    /* interrupt gate clear IF mask */
821
    if ((type & 1) == 0) {
822
        env->eflags &= ~IF_MASK;
823
    }
824
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825
}
826

    
827
#ifdef TARGET_X86_64
828

    
829
#define PUSHQ(sp, val)\
830
{\
831
    sp -= 8;\
832
    stq_kernel(sp, (val));\
833
}
834

    
835
#define POPQ(sp, val)\
836
{\
837
    val = ldq_kernel(sp);\
838
    sp += 8;\
839
}
840

    
841
static inline target_ulong get_rsp_from_tss(int level)
842
{
843
    int index;
844

    
845
#if 0
846
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847
           env->tr.base, env->tr.limit);
848
#endif
849

    
850
    if (!(env->tr.flags & DESC_P_MASK))
851
        cpu_abort(env, "invalid tss");
852
    index = 8 * level + 4;
853
    if ((index + 7) > env->tr.limit)
854
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855
    return ldq_kernel(env->tr.base + index);
856
}
857

    
858
/* 64 bit interrupt */
859
static void do_interrupt64(int intno, int is_int, int error_code,
860
                           target_ulong next_eip, int is_hw)
861
{
862
    SegmentCache *dt;
863
    target_ulong ptr;
864
    int type, dpl, selector, cpl, ist;
865
    int has_error_code, new_stack;
866
    uint32_t e1, e2, e3, ss;
867
    target_ulong old_eip, esp, offset;
868

    
869
    has_error_code = 0;
870
    if (!is_int && !is_hw) {
871
        switch(intno) {
872
        case 8:
873
        case 10:
874
        case 11:
875
        case 12:
876
        case 13:
877
        case 14:
878
        case 17:
879
            has_error_code = 1;
880
            break;
881
        }
882
    }
883
    if (is_int)
884
        old_eip = next_eip;
885
    else
886
        old_eip = env->eip;
887

    
888
    dt = &env->idt;
889
    if (intno * 16 + 15 > dt->limit)
890
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891
    ptr = dt->base + intno * 16;
892
    e1 = ldl_kernel(ptr);
893
    e2 = ldl_kernel(ptr + 4);
894
    e3 = ldl_kernel(ptr + 8);
895
    /* check gate type */
896
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897
    switch(type) {
898
    case 14: /* 386 interrupt gate */
899
    case 15: /* 386 trap gate */
900
        break;
901
    default:
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
        break;
904
    }
905
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906
    cpl = env->hflags & HF_CPL_MASK;
907
    /* check privilege if software int */
908
    if (is_int && dpl < cpl)
909
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910
    /* check valid bit */
911
    if (!(e2 & DESC_P_MASK))
912
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913
    selector = e1 >> 16;
914
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915
    ist = e2 & 7;
916
    if ((selector & 0xfffc) == 0)
917
        raise_exception_err(EXCP0D_GPF, 0);
918

    
919
    if (load_segment(&e1, &e2, selector) != 0)
920
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924
    if (dpl > cpl)
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    if (!(e2 & DESC_P_MASK))
927
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931
        /* to inner privilege */
932
        if (ist != 0)
933
            esp = get_rsp_from_tss(ist + 3);
934
        else
935
            esp = get_rsp_from_tss(dpl);
936
        esp &= ~0xfLL; /* align stack */
937
        ss = 0;
938
        new_stack = 1;
939
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940
        /* to same privilege */
941
        if (env->eflags & VM_MASK)
942
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
        new_stack = 0;
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = ESP;
948
        esp &= ~0xfLL; /* align stack */
949
        dpl = cpl;
950
    } else {
951
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952
        new_stack = 0; /* avoid warning */
953
        esp = 0; /* avoid warning */
954
    }
955

    
956
    PUSHQ(esp, env->segs[R_SS].selector);
957
    PUSHQ(esp, ESP);
958
    PUSHQ(esp, compute_eflags());
959
    PUSHQ(esp, env->segs[R_CS].selector);
960
    PUSHQ(esp, old_eip);
961
    if (has_error_code) {
962
        PUSHQ(esp, error_code);
963
    }
964

    
965
    if (new_stack) {
966
        ss = 0 | dpl;
967
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
968
    }
969
    ESP = esp;
970

    
971
    selector = (selector & ~3) | dpl;
972
    cpu_x86_load_seg_cache(env, R_CS, selector,
973
                   get_seg_base(e1, e2),
974
                   get_seg_limit(e1, e2),
975
                   e2);
976
    cpu_x86_set_cpl(env, dpl);
977
    env->eip = offset;
978

    
979
    /* interrupt gate clear IF mask */
980
    if ((type & 1) == 0) {
981
        env->eflags &= ~IF_MASK;
982
    }
983
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
984
}
985
#endif
986

    
987
#if defined(CONFIG_USER_ONLY)
988
void helper_syscall(int next_eip_addend)
989
{
990
    env->exception_index = EXCP_SYSCALL;
991
    env->exception_next_eip = env->eip + next_eip_addend;
992
    cpu_loop_exit();
993
}
994
#else
995
void helper_syscall(int next_eip_addend)
996
{
997
    int selector;
998

    
999
    if (!(env->efer & MSR_EFER_SCE)) {
1000
        raise_exception_err(EXCP06_ILLOP, 0);
1001
    }
1002
    selector = (env->star >> 32) & 0xffff;
1003
#ifdef TARGET_X86_64
1004
    if (env->hflags & HF_LMA_MASK) {
1005
        int code64;
1006

    
1007
        ECX = env->eip + next_eip_addend;
1008
        env->regs[11] = compute_eflags();
1009

    
1010
        code64 = env->hflags & HF_CS64_MASK;
1011

    
1012
        cpu_x86_set_cpl(env, 0);
1013
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014
                           0, 0xffffffff,
1015
                               DESC_G_MASK | DESC_P_MASK |
1016
                               DESC_S_MASK |
1017
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019
                               0, 0xffffffff,
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK |
1022
                               DESC_W_MASK | DESC_A_MASK);
1023
        env->eflags &= ~env->fmask;
1024
        load_eflags(env->eflags, 0);
1025
        if (code64)
1026
            env->eip = env->lstar;
1027
        else
1028
            env->eip = env->cstar;
1029
    } else
1030
#endif
1031
    {
1032
        ECX = (uint32_t)(env->eip + next_eip_addend);
1033

    
1034
        cpu_x86_set_cpl(env, 0);
1035
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036
                           0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041
                               0, 0xffffffff,
1042
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043
                               DESC_S_MASK |
1044
                               DESC_W_MASK | DESC_A_MASK);
1045
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046
        env->eip = (uint32_t)env->star;
1047
    }
1048
}
1049
#endif
1050

    
1051
void helper_sysret(int dflag)
1052
{
1053
    int cpl, selector;
1054

    
1055
    if (!(env->efer & MSR_EFER_SCE)) {
1056
        raise_exception_err(EXCP06_ILLOP, 0);
1057
    }
1058
    cpl = env->hflags & HF_CPL_MASK;
1059
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060
        raise_exception_err(EXCP0D_GPF, 0);
1061
    }
1062
    selector = (env->star >> 48) & 0xffff;
1063
#ifdef TARGET_X86_64
1064
    if (env->hflags & HF_LMA_MASK) {
1065
        if (dflag == 2) {
1066
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067
                                   0, 0xffffffff,
1068
                                   DESC_G_MASK | DESC_P_MASK |
1069
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071
                                   DESC_L_MASK);
1072
            env->eip = ECX;
1073
        } else {
1074
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075
                                   0, 0xffffffff,
1076
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079
            env->eip = (uint32_t)ECX;
1080
        }
1081
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082
                               0, 0xffffffff,
1083
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                               DESC_W_MASK | DESC_A_MASK);
1086
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088
        cpu_x86_set_cpl(env, 3);
1089
    } else
1090
#endif
1091
    {
1092
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097
        env->eip = (uint32_t)ECX;
1098
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099
                               0, 0xffffffff,
1100
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                               DESC_W_MASK | DESC_A_MASK);
1103
        env->eflags |= IF_MASK;
1104
        cpu_x86_set_cpl(env, 3);
1105
    }
1106
#ifdef USE_KQEMU
1107
    if (kqemu_is_ok(env)) {
1108
        if (env->hflags & HF_LMA_MASK)
1109
            CC_OP = CC_OP_EFLAGS;
1110
        env->exception_index = -1;
1111
        cpu_loop_exit();
1112
    }
1113
#endif
1114
}
1115

    
1116
/* real mode interrupt */
1117
static void do_interrupt_real(int intno, int is_int, int error_code,
1118
                              unsigned int next_eip)
1119
{
1120
    SegmentCache *dt;
1121
    target_ulong ptr, ssp;
1122
    int selector;
1123
    uint32_t offset, esp;
1124
    uint32_t old_cs, old_eip;
1125

    
1126
    /* real mode (simpler !) */
1127
    dt = &env->idt;
1128
    if (intno * 4 + 3 > dt->limit)
1129
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130
    ptr = dt->base + intno * 4;
1131
    offset = lduw_kernel(ptr);
1132
    selector = lduw_kernel(ptr + 2);
1133
    esp = ESP;
1134
    ssp = env->segs[R_SS].base;
1135
    if (is_int)
1136
        old_eip = next_eip;
1137
    else
1138
        old_eip = env->eip;
1139
    old_cs = env->segs[R_CS].selector;
1140
    /* XXX: use SS segment size ? */
1141
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1142
    PUSHW(ssp, esp, 0xffff, old_cs);
1143
    PUSHW(ssp, esp, 0xffff, old_eip);
1144

    
1145
    /* update processor state */
1146
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147
    env->eip = offset;
1148
    env->segs[R_CS].selector = selector;
1149
    env->segs[R_CS].base = (selector << 4);
1150
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151
}
1152

    
1153
/* fake user mode interrupt */
1154
void do_interrupt_user(int intno, int is_int, int error_code,
1155
                       target_ulong next_eip)
1156
{
1157
    SegmentCache *dt;
1158
    target_ulong ptr;
1159
    int dpl, cpl, shift;
1160
    uint32_t e2;
1161

    
1162
    dt = &env->idt;
1163
    if (env->hflags & HF_LMA_MASK) {
1164
        shift = 4;
1165
    } else {
1166
        shift = 3;
1167
    }
1168
    ptr = dt->base + (intno << shift);
1169
    e2 = ldl_kernel(ptr + 4);
1170

    
1171
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172
    cpl = env->hflags & HF_CPL_MASK;
1173
    /* check privilege if software int */
1174
    if (is_int && dpl < cpl)
1175
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176

    
1177
    /* Since we emulate only user space, we cannot do more than
1178
       exiting the emulation with the suitable exception and error
1179
       code */
1180
    if (is_int)
1181
        EIP = next_eip;
1182
}
1183

    
1184
/*
1185
 * Begin execution of an interruption. is_int is TRUE if coming from
1186
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187
 * instruction. It is only relevant if is_int is TRUE.
1188
 */
1189
void do_interrupt(int intno, int is_int, int error_code,
1190
                  target_ulong next_eip, int is_hw)
1191
{
1192
    if (loglevel & CPU_LOG_INT) {
1193
        if ((env->cr[0] & CR0_PE_MASK)) {
1194
            static int count;
1195
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196
                    count, intno, error_code, is_int,
1197
                    env->hflags & HF_CPL_MASK,
1198
                    env->segs[R_CS].selector, EIP,
1199
                    (int)env->segs[R_CS].base + EIP,
1200
                    env->segs[R_SS].selector, ESP);
1201
            if (intno == 0x0e) {
1202
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203
            } else {
1204
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1205
            }
1206
            fprintf(logfile, "\n");
1207
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208
#if 0
1209
            {
1210
                int i;
1211
                uint8_t *ptr;
1212
                fprintf(logfile, "       code=");
1213
                ptr = env->segs[R_CS].base + env->eip;
1214
                for(i = 0; i < 16; i++) {
1215
                    fprintf(logfile, " %02x", ldub(ptr + i));
1216
                }
1217
                fprintf(logfile, "\n");
1218
            }
1219
#endif
1220
            count++;
1221
        }
1222
    }
1223
    if (env->cr[0] & CR0_PE_MASK) {
1224
#ifdef TARGET_X86_64
1225
        if (env->hflags & HF_LMA_MASK) {
1226
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227
        } else
1228
#endif
1229
        {
1230
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1231
        }
1232
    } else {
1233
        do_interrupt_real(intno, is_int, error_code, next_eip);
1234
    }
1235
}
1236

    
1237
/*
1238
 * Check nested exceptions and change to double or triple fault if
1239
 * needed. It should only be called, if this is not an interrupt.
1240
 * Returns the new exception number.
1241
 */
1242
static int check_exception(int intno, int *error_code)
1243
{
1244
    int first_contributory = env->old_exception == 0 ||
1245
                              (env->old_exception >= 10 &&
1246
                               env->old_exception <= 13);
1247
    int second_contributory = intno == 0 ||
1248
                               (intno >= 10 && intno <= 13);
1249

    
1250
    if (loglevel & CPU_LOG_INT)
1251
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252
                env->old_exception, intno);
1253

    
1254
    if (env->old_exception == EXCP08_DBLE)
1255
        cpu_abort(env, "triple fault");
1256

    
1257
    if ((first_contributory && second_contributory)
1258
        || (env->old_exception == EXCP0E_PAGE &&
1259
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1260
        intno = EXCP08_DBLE;
1261
        *error_code = 0;
1262
    }
1263

    
1264
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1265
        (intno == EXCP08_DBLE))
1266
        env->old_exception = intno;
1267

    
1268
    return intno;
1269
}
1270

    
1271
/*
1272
 * Signal an interruption. It is executed in the main CPU loop.
1273
 * is_int is TRUE if coming from the int instruction. next_eip is the
1274
 * EIP value AFTER the interrupt instruction. It is only relevant if
1275
 * is_int is TRUE.
1276
 */
1277
void raise_interrupt(int intno, int is_int, int error_code,
1278
                     int next_eip_addend)
1279
{
1280
    if (!is_int) {
1281
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282
        intno = check_exception(intno, &error_code);
1283
    } else {
1284
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1285
    }
1286

    
1287
    env->exception_index = intno;
1288
    env->error_code = error_code;
1289
    env->exception_is_int = is_int;
1290
    env->exception_next_eip = env->eip + next_eip_addend;
1291
    cpu_loop_exit();
1292
}
1293

    
1294
/* shortcuts to generate exceptions */
1295

    
1296
void (raise_exception_err)(int exception_index, int error_code)
1297
{
1298
    raise_interrupt(exception_index, 0, error_code, 0);
1299
}
1300

    
1301
void raise_exception(int exception_index)
1302
{
1303
    raise_interrupt(exception_index, 0, 0, 0);
1304
}
1305

    
1306
/* SMM support */
1307

    
1308
#if defined(CONFIG_USER_ONLY)
1309

    
1310
void do_smm_enter(void)
1311
{
1312
}
1313

    
1314
void helper_rsm(void)
1315
{
1316
}
1317

    
1318
#else
1319

    
1320
#ifdef TARGET_X86_64
1321
#define SMM_REVISION_ID 0x00020064
1322
#else
1323
#define SMM_REVISION_ID 0x00020000
1324
#endif
1325

    
1326
void do_smm_enter(void)
1327
{
1328
    target_ulong sm_state;
1329
    SegmentCache *dt;
1330
    int i, offset;
1331

    
1332
    if (loglevel & CPU_LOG_INT) {
1333
        fprintf(logfile, "SMM: enter\n");
1334
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1335
    }
1336

    
1337
    env->hflags |= HF_SMM_MASK;
1338
    cpu_smm_update(env);
1339

    
1340
    sm_state = env->smbase + 0x8000;
1341

    
1342
#ifdef TARGET_X86_64
1343
    for(i = 0; i < 6; i++) {
1344
        dt = &env->segs[i];
1345
        offset = 0x7e00 + i * 16;
1346
        stw_phys(sm_state + offset, dt->selector);
1347
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348
        stl_phys(sm_state + offset + 4, dt->limit);
1349
        stq_phys(sm_state + offset + 8, dt->base);
1350
    }
1351

    
1352
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1353
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1354

    
1355
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1357
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1359

    
1360
    stq_phys(sm_state + 0x7e88, env->idt.base);
1361
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1362

    
1363
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1364
    stq_phys(sm_state + 0x7e98, env->tr.base);
1365
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1366
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1367

    
1368
    stq_phys(sm_state + 0x7ed0, env->efer);
1369

    
1370
    stq_phys(sm_state + 0x7ff8, EAX);
1371
    stq_phys(sm_state + 0x7ff0, ECX);
1372
    stq_phys(sm_state + 0x7fe8, EDX);
1373
    stq_phys(sm_state + 0x7fe0, EBX);
1374
    stq_phys(sm_state + 0x7fd8, ESP);
1375
    stq_phys(sm_state + 0x7fd0, EBP);
1376
    stq_phys(sm_state + 0x7fc8, ESI);
1377
    stq_phys(sm_state + 0x7fc0, EDI);
1378
    for(i = 8; i < 16; i++)
1379
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380
    stq_phys(sm_state + 0x7f78, env->eip);
1381
    stl_phys(sm_state + 0x7f70, compute_eflags());
1382
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1383
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1384

    
1385
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1386
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1387
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1388

    
1389
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390
    stl_phys(sm_state + 0x7f00, env->smbase);
1391
#else
1392
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1395
    stl_phys(sm_state + 0x7ff0, env->eip);
1396
    stl_phys(sm_state + 0x7fec, EDI);
1397
    stl_phys(sm_state + 0x7fe8, ESI);
1398
    stl_phys(sm_state + 0x7fe4, EBP);
1399
    stl_phys(sm_state + 0x7fe0, ESP);
1400
    stl_phys(sm_state + 0x7fdc, EBX);
1401
    stl_phys(sm_state + 0x7fd8, EDX);
1402
    stl_phys(sm_state + 0x7fd4, ECX);
1403
    stl_phys(sm_state + 0x7fd0, EAX);
1404
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1406

    
1407
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408
    stl_phys(sm_state + 0x7f64, env->tr.base);
1409
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1410
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1411

    
1412
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1414
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1416

    
1417
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1418
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1419

    
1420
    stl_phys(sm_state + 0x7f58, env->idt.base);
1421
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1422

    
1423
    for(i = 0; i < 6; i++) {
1424
        dt = &env->segs[i];
1425
        if (i < 3)
1426
            offset = 0x7f84 + i * 12;
1427
        else
1428
            offset = 0x7f2c + (i - 3) * 12;
1429
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430
        stl_phys(sm_state + offset + 8, dt->base);
1431
        stl_phys(sm_state + offset + 4, dt->limit);
1432
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1433
    }
1434
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1435

    
1436
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437
    stl_phys(sm_state + 0x7ef8, env->smbase);
1438
#endif
1439
    /* init SMM cpu state */
1440

    
1441
#ifdef TARGET_X86_64
1442
    cpu_load_efer(env, 0);
1443
#endif
1444
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445
    env->eip = 0x00008000;
1446
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447
                           0xffffffff, 0);
1448
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1453

    
1454
    cpu_x86_update_cr0(env,
1455
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456
    cpu_x86_update_cr4(env, 0);
1457
    env->dr[7] = 0x00000400;
1458
    CC_OP = CC_OP_EFLAGS;
1459
}
1460

    
1461
void helper_rsm(void)
1462
{
1463
    target_ulong sm_state;
1464
    int i, offset;
1465
    uint32_t val;
1466

    
1467
    sm_state = env->smbase + 0x8000;
1468
#ifdef TARGET_X86_64
1469
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1470

    
1471
    for(i = 0; i < 6; i++) {
1472
        offset = 0x7e00 + i * 16;
1473
        cpu_x86_load_seg_cache(env, i,
1474
                               lduw_phys(sm_state + offset),
1475
                               ldq_phys(sm_state + offset + 8),
1476
                               ldl_phys(sm_state + offset + 4),
1477
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1478
    }
1479

    
1480
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1482

    
1483
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1487

    
1488
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1489
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1490

    
1491
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1493
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1495

    
1496
    EAX = ldq_phys(sm_state + 0x7ff8);
1497
    ECX = ldq_phys(sm_state + 0x7ff0);
1498
    EDX = ldq_phys(sm_state + 0x7fe8);
1499
    EBX = ldq_phys(sm_state + 0x7fe0);
1500
    ESP = ldq_phys(sm_state + 0x7fd8);
1501
    EBP = ldq_phys(sm_state + 0x7fd0);
1502
    ESI = ldq_phys(sm_state + 0x7fc8);
1503
    EDI = ldq_phys(sm_state + 0x7fc0);
1504
    for(i = 8; i < 16; i++)
1505
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506
    env->eip = ldq_phys(sm_state + 0x7f78);
1507
    load_eflags(ldl_phys(sm_state + 0x7f70),
1508
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1511

    
1512
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1515

    
1516
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517
    if (val & 0x20000) {
1518
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1519
    }
1520
#else
1521
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1524
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525
    env->eip = ldl_phys(sm_state + 0x7ff0);
1526
    EDI = ldl_phys(sm_state + 0x7fec);
1527
    ESI = ldl_phys(sm_state + 0x7fe8);
1528
    EBP = ldl_phys(sm_state + 0x7fe4);
1529
    ESP = ldl_phys(sm_state + 0x7fe0);
1530
    EBX = ldl_phys(sm_state + 0x7fdc);
1531
    EDX = ldl_phys(sm_state + 0x7fd8);
1532
    ECX = ldl_phys(sm_state + 0x7fd4);
1533
    EAX = ldl_phys(sm_state + 0x7fd0);
1534
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1536

    
1537
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1539
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1541

    
1542
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1546

    
1547
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1549

    
1550
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1551
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1552

    
1553
    for(i = 0; i < 6; i++) {
1554
        if (i < 3)
1555
            offset = 0x7f84 + i * 12;
1556
        else
1557
            offset = 0x7f2c + (i - 3) * 12;
1558
        cpu_x86_load_seg_cache(env, i,
1559
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560
                               ldl_phys(sm_state + offset + 8),
1561
                               ldl_phys(sm_state + offset + 4),
1562
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1563
    }
1564
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1565

    
1566
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567
    if (val & 0x20000) {
1568
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1569
    }
1570
#endif
1571
    CC_OP = CC_OP_EFLAGS;
1572
    env->hflags &= ~HF_SMM_MASK;
1573
    cpu_smm_update(env);
1574

    
1575
    if (loglevel & CPU_LOG_INT) {
1576
        fprintf(logfile, "SMM: after RSM\n");
1577
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578
    }
1579
}
1580

    
1581
#endif /* !CONFIG_USER_ONLY */
1582

    
1583

    
1584
/* division, flags are undefined */
1585

    
1586
void helper_divb_AL(target_ulong t0)
1587
{
1588
    unsigned int num, den, q, r;
1589

    
1590
    num = (EAX & 0xffff);
1591
    den = (t0 & 0xff);
1592
    if (den == 0) {
1593
        raise_exception(EXCP00_DIVZ);
1594
    }
1595
    q = (num / den);
1596
    if (q > 0xff)
1597
        raise_exception(EXCP00_DIVZ);
1598
    q &= 0xff;
1599
    r = (num % den) & 0xff;
1600
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1601
}
1602

    
1603
void helper_idivb_AL(target_ulong t0)
1604
{
1605
    int num, den, q, r;
1606

    
1607
    num = (int16_t)EAX;
1608
    den = (int8_t)t0;
1609
    if (den == 0) {
1610
        raise_exception(EXCP00_DIVZ);
1611
    }
1612
    q = (num / den);
1613
    if (q != (int8_t)q)
1614
        raise_exception(EXCP00_DIVZ);
1615
    q &= 0xff;
1616
    r = (num % den) & 0xff;
1617
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1618
}
1619

    
1620
void helper_divw_AX(target_ulong t0)
1621
{
1622
    unsigned int num, den, q, r;
1623

    
1624
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625
    den = (t0 & 0xffff);
1626
    if (den == 0) {
1627
        raise_exception(EXCP00_DIVZ);
1628
    }
1629
    q = (num / den);
1630
    if (q > 0xffff)
1631
        raise_exception(EXCP00_DIVZ);
1632
    q &= 0xffff;
1633
    r = (num % den) & 0xffff;
1634
    EAX = (EAX & ~0xffff) | q;
1635
    EDX = (EDX & ~0xffff) | r;
1636
}
1637

    
1638
void helper_idivw_AX(target_ulong t0)
1639
{
1640
    int num, den, q, r;
1641

    
1642
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643
    den = (int16_t)t0;
1644
    if (den == 0) {
1645
        raise_exception(EXCP00_DIVZ);
1646
    }
1647
    q = (num / den);
1648
    if (q != (int16_t)q)
1649
        raise_exception(EXCP00_DIVZ);
1650
    q &= 0xffff;
1651
    r = (num % den) & 0xffff;
1652
    EAX = (EAX & ~0xffff) | q;
1653
    EDX = (EDX & ~0xffff) | r;
1654
}
1655

    
1656
void helper_divl_EAX(target_ulong t0)
1657
{
1658
    unsigned int den, r;
1659
    uint64_t num, q;
1660

    
1661
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662
    den = t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    r = (num % den);
1668
    if (q > 0xffffffff)
1669
        raise_exception(EXCP00_DIVZ);
1670
    EAX = (uint32_t)q;
1671
    EDX = (uint32_t)r;
1672
}
1673

    
1674
void helper_idivl_EAX(target_ulong t0)
1675
{
1676
    int den, r;
1677
    int64_t num, q;
1678

    
1679
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680
    den = t0;
1681
    if (den == 0) {
1682
        raise_exception(EXCP00_DIVZ);
1683
    }
1684
    q = (num / den);
1685
    r = (num % den);
1686
    if (q != (int32_t)q)
1687
        raise_exception(EXCP00_DIVZ);
1688
    EAX = (uint32_t)q;
1689
    EDX = (uint32_t)r;
1690
}
1691

    
1692
/* bcd */
1693

    
1694
/* XXX: exception */
1695
void helper_aam(int base)
1696
{
1697
    int al, ah;
1698
    al = EAX & 0xff;
1699
    ah = al / base;
1700
    al = al % base;
1701
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1702
    CC_DST = al;
1703
}
1704

    
1705
void helper_aad(int base)
1706
{
1707
    int al, ah;
1708
    al = EAX & 0xff;
1709
    ah = (EAX >> 8) & 0xff;
1710
    al = ((ah * base) + al) & 0xff;
1711
    EAX = (EAX & ~0xffff) | al;
1712
    CC_DST = al;
1713
}
1714

    
1715
void helper_aaa(void)
1716
{
1717
    int icarry;
1718
    int al, ah, af;
1719
    int eflags;
1720

    
1721
    eflags = cc_table[CC_OP].compute_all();
1722
    af = eflags & CC_A;
1723
    al = EAX & 0xff;
1724
    ah = (EAX >> 8) & 0xff;
1725

    
1726
    icarry = (al > 0xf9);
1727
    if (((al & 0x0f) > 9 ) || af) {
1728
        al = (al + 6) & 0x0f;
1729
        ah = (ah + 1 + icarry) & 0xff;
1730
        eflags |= CC_C | CC_A;
1731
    } else {
1732
        eflags &= ~(CC_C | CC_A);
1733
        al &= 0x0f;
1734
    }
1735
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1736
    CC_SRC = eflags;
1737
    FORCE_RET();
1738
}
1739

    
1740
void helper_aas(void)
1741
{
1742
    int icarry;
1743
    int al, ah, af;
1744
    int eflags;
1745

    
1746
    eflags = cc_table[CC_OP].compute_all();
1747
    af = eflags & CC_A;
1748
    al = EAX & 0xff;
1749
    ah = (EAX >> 8) & 0xff;
1750

    
1751
    icarry = (al < 6);
1752
    if (((al & 0x0f) > 9 ) || af) {
1753
        al = (al - 6) & 0x0f;
1754
        ah = (ah - 1 - icarry) & 0xff;
1755
        eflags |= CC_C | CC_A;
1756
    } else {
1757
        eflags &= ~(CC_C | CC_A);
1758
        al &= 0x0f;
1759
    }
1760
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1761
    CC_SRC = eflags;
1762
    FORCE_RET();
1763
}
1764

    
1765
void helper_daa(void)
1766
{
1767
    int al, af, cf;
1768
    int eflags;
1769

    
1770
    eflags = cc_table[CC_OP].compute_all();
1771
    cf = eflags & CC_C;
1772
    af = eflags & CC_A;
1773
    al = EAX & 0xff;
1774

    
1775
    eflags = 0;
1776
    if (((al & 0x0f) > 9 ) || af) {
1777
        al = (al + 6) & 0xff;
1778
        eflags |= CC_A;
1779
    }
1780
    if ((al > 0x9f) || cf) {
1781
        al = (al + 0x60) & 0xff;
1782
        eflags |= CC_C;
1783
    }
1784
    EAX = (EAX & ~0xff) | al;
1785
    /* well, speed is not an issue here, so we compute the flags by hand */
1786
    eflags |= (al == 0) << 6; /* zf */
1787
    eflags |= parity_table[al]; /* pf */
1788
    eflags |= (al & 0x80); /* sf */
1789
    CC_SRC = eflags;
1790
    FORCE_RET();
1791
}
1792

    
1793
void helper_das(void)
1794
{
1795
    int al, al1, af, cf;
1796
    int eflags;
1797

    
1798
    eflags = cc_table[CC_OP].compute_all();
1799
    cf = eflags & CC_C;
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802

    
1803
    eflags = 0;
1804
    al1 = al;
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        eflags |= CC_A;
1807
        if (al < 6 || cf)
1808
            eflags |= CC_C;
1809
        al = (al - 6) & 0xff;
1810
    }
1811
    if ((al1 > 0x99) || cf) {
1812
        al = (al - 0x60) & 0xff;
1813
        eflags |= CC_C;
1814
    }
1815
    EAX = (EAX & ~0xff) | al;
1816
    /* well, speed is not an issue here, so we compute the flags by hand */
1817
    eflags |= (al == 0) << 6; /* zf */
1818
    eflags |= parity_table[al]; /* pf */
1819
    eflags |= (al & 0x80); /* sf */
1820
    CC_SRC = eflags;
1821
    FORCE_RET();
1822
}
1823

    
1824
void helper_into(int next_eip_addend)
1825
{
1826
    int eflags;
1827
    eflags = cc_table[CC_OP].compute_all();
1828
    if (eflags & CC_O) {
1829
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1830
    }
1831
}
1832

    
1833
void helper_cmpxchg8b(target_ulong a0)
1834
{
1835
    uint64_t d;
1836
    int eflags;
1837

    
1838
    eflags = cc_table[CC_OP].compute_all();
1839
    d = ldq(a0);
1840
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842
        eflags |= CC_Z;
1843
    } else {
1844
        /* always do the store */
1845
        stq(a0, d); 
1846
        EDX = (uint32_t)(d >> 32);
1847
        EAX = (uint32_t)d;
1848
        eflags &= ~CC_Z;
1849
    }
1850
    CC_SRC = eflags;
1851
}
1852

    
1853
#ifdef TARGET_X86_64
1854
void helper_cmpxchg16b(target_ulong a0)
1855
{
1856
    uint64_t d0, d1;
1857
    int eflags;
1858

    
1859
    if ((a0 & 0xf) != 0)
1860
        raise_exception(EXCP0D_GPF);
1861
    eflags = cc_table[CC_OP].compute_all();
1862
    d0 = ldq(a0);
1863
    d1 = ldq(a0 + 8);
1864
    if (d0 == EAX && d1 == EDX) {
1865
        stq(a0, EBX);
1866
        stq(a0 + 8, ECX);
1867
        eflags |= CC_Z;
1868
    } else {
1869
        /* always do the store */
1870
        stq(a0, d0); 
1871
        stq(a0 + 8, d1); 
1872
        EDX = d1;
1873
        EAX = d0;
1874
        eflags &= ~CC_Z;
1875
    }
1876
    CC_SRC = eflags;
1877
}
1878
#endif
1879

    
1880
void helper_single_step(void)
1881
{
1882
    env->dr[6] |= 0x4000;
1883
    raise_exception(EXCP01_SSTP);
1884
}
1885

    
1886
void helper_cpuid(void)
1887
{
1888
    uint32_t index;
1889

    
1890
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1891
    
1892
    index = (uint32_t)EAX;
1893
    /* test if maximum index reached */
1894
    if (index & 0x80000000) {
1895
        if (index > env->cpuid_xlevel)
1896
            index = env->cpuid_level;
1897
    } else {
1898
        if (index > env->cpuid_level)
1899
            index = env->cpuid_level;
1900
    }
1901

    
1902
    switch(index) {
1903
    case 0:
1904
        EAX = env->cpuid_level;
1905
        EBX = env->cpuid_vendor1;
1906
        EDX = env->cpuid_vendor2;
1907
        ECX = env->cpuid_vendor3;
1908
        break;
1909
    case 1:
1910
        EAX = env->cpuid_version;
1911
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912
        ECX = env->cpuid_ext_features;
1913
        EDX = env->cpuid_features;
1914
        break;
1915
    case 2:
1916
        /* cache info: needed for Pentium Pro compatibility */
1917
        EAX = 1;
1918
        EBX = 0;
1919
        ECX = 0;
1920
        EDX = 0x2c307d;
1921
        break;
1922
    case 4:
1923
        /* cache info: needed for Core compatibility */
1924
        switch (ECX) {
1925
            case 0: /* L1 dcache info */
1926
                EAX = 0x0000121;
1927
                EBX = 0x1c0003f;
1928
                ECX = 0x000003f;
1929
                EDX = 0x0000001;
1930
                break;
1931
            case 1: /* L1 icache info */
1932
                EAX = 0x0000122;
1933
                EBX = 0x1c0003f;
1934
                ECX = 0x000003f;
1935
                EDX = 0x0000001;
1936
                break;
1937
            case 2: /* L2 cache info */
1938
                EAX = 0x0000143;
1939
                EBX = 0x3c0003f;
1940
                ECX = 0x0000fff;
1941
                EDX = 0x0000001;
1942
                break;
1943
            default: /* end of info */
1944
                EAX = 0;
1945
                EBX = 0;
1946
                ECX = 0;
1947
                EDX = 0;
1948
                break;
1949
        }
1950

    
1951
        break;
1952
    case 5:
1953
        /* mwait info: needed for Core compatibility */
1954
        EAX = 0; /* Smallest monitor-line size in bytes */
1955
        EBX = 0; /* Largest monitor-line size in bytes */
1956
        ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1957
        EDX = 0;
1958
        break;
1959
    case 0x80000000:
1960
        EAX = env->cpuid_xlevel;
1961
        EBX = env->cpuid_vendor1;
1962
        EDX = env->cpuid_vendor2;
1963
        ECX = env->cpuid_vendor3;
1964
        break;
1965
    case 0x80000001:
1966
        EAX = env->cpuid_features;
1967
        EBX = 0;
1968
        ECX = env->cpuid_ext3_features;
1969
        EDX = env->cpuid_ext2_features;
1970
        break;
1971
    case 0x80000002:
1972
    case 0x80000003:
1973
    case 0x80000004:
1974
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1975
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1976
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1977
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1978
        break;
1979
    case 0x80000005:
1980
        /* cache info (L1 cache) */
1981
        EAX = 0x01ff01ff;
1982
        EBX = 0x01ff01ff;
1983
        ECX = 0x40020140;
1984
        EDX = 0x40020140;
1985
        break;
1986
    case 0x80000006:
1987
        /* cache info (L2 cache) */
1988
        EAX = 0;
1989
        EBX = 0x42004200;
1990
        ECX = 0x02008140;
1991
        EDX = 0;
1992
        break;
1993
    case 0x80000008:
1994
        /* virtual & phys address size in low 2 bytes. */
1995
/* XXX: This value must match the one used in the MMU code. */ 
1996
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1997
            /* 64 bit processor */
1998
#if defined(USE_KQEMU)
1999
            EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
2000
#else
2001
/* XXX: The physical address space is limited to 42 bits in exec.c. */
2002
            EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
2003
#endif
2004
        } else {
2005
#if defined(USE_KQEMU)
2006
            EAX = 0x00000020;        /* 32 bits physical */
2007
#else
2008
            EAX = 0x00000024;        /* 36 bits physical */
2009
#endif
2010
        }
2011
        EBX = 0;
2012
        ECX = 0;
2013
        EDX = 0;
2014
        break;
2015
    case 0x8000000A:
2016
        EAX = 0x00000001;
2017
        EBX = 0;
2018
        ECX = 0;
2019
        EDX = 0;
2020
        break;
2021
    default:
2022
        /* reserved values: zero */
2023
        EAX = 0;
2024
        EBX = 0;
2025
        ECX = 0;
2026
        EDX = 0;
2027
        break;
2028
    }
2029
}
2030

    
2031
void helper_enter_level(int level, int data32, target_ulong t1)
2032
{
2033
    target_ulong ssp;
2034
    uint32_t esp_mask, esp, ebp;
2035

    
2036
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2037
    ssp = env->segs[R_SS].base;
2038
    ebp = EBP;
2039
    esp = ESP;
2040
    if (data32) {
2041
        /* 32 bit */
2042
        esp -= 4;
2043
        while (--level) {
2044
            esp -= 4;
2045
            ebp -= 4;
2046
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2047
        }
2048
        esp -= 4;
2049
        stl(ssp + (esp & esp_mask), t1);
2050
    } else {
2051
        /* 16 bit */
2052
        esp -= 2;
2053
        while (--level) {
2054
            esp -= 2;
2055
            ebp -= 2;
2056
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2057
        }
2058
        esp -= 2;
2059
        stw(ssp + (esp & esp_mask), t1);
2060
    }
2061
}
2062

    
2063
#ifdef TARGET_X86_64
2064
void helper_enter64_level(int level, int data64, target_ulong t1)
2065
{
2066
    target_ulong esp, ebp;
2067
    ebp = EBP;
2068
    esp = ESP;
2069

    
2070
    if (data64) {
2071
        /* 64 bit */
2072
        esp -= 8;
2073
        while (--level) {
2074
            esp -= 8;
2075
            ebp -= 8;
2076
            stq(esp, ldq(ebp));
2077
        }
2078
        esp -= 8;
2079
        stq(esp, t1);
2080
    } else {
2081
        /* 16 bit */
2082
        esp -= 2;
2083
        while (--level) {
2084
            esp -= 2;
2085
            ebp -= 2;
2086
            stw(esp, lduw(ebp));
2087
        }
2088
        esp -= 2;
2089
        stw(esp, t1);
2090
    }
2091
}
2092
#endif
2093

    
2094
void helper_lldt(int selector)
2095
{
2096
    SegmentCache *dt;
2097
    uint32_t e1, e2;
2098
    int index, entry_limit;
2099
    target_ulong ptr;
2100

    
2101
    selector &= 0xffff;
2102
    if ((selector & 0xfffc) == 0) {
2103
        /* XXX: NULL selector case: invalid LDT */
2104
        env->ldt.base = 0;
2105
        env->ldt.limit = 0;
2106
    } else {
2107
        if (selector & 0x4)
2108
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109
        dt = &env->gdt;
2110
        index = selector & ~7;
2111
#ifdef TARGET_X86_64
2112
        if (env->hflags & HF_LMA_MASK)
2113
            entry_limit = 15;
2114
        else
2115
#endif
2116
            entry_limit = 7;
2117
        if ((index + entry_limit) > dt->limit)
2118
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119
        ptr = dt->base + index;
2120
        e1 = ldl_kernel(ptr);
2121
        e2 = ldl_kernel(ptr + 4);
2122
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2123
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2124
        if (!(e2 & DESC_P_MASK))
2125
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2126
#ifdef TARGET_X86_64
2127
        if (env->hflags & HF_LMA_MASK) {
2128
            uint32_t e3;
2129
            e3 = ldl_kernel(ptr + 8);
2130
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2131
            env->ldt.base |= (target_ulong)e3 << 32;
2132
        } else
2133
#endif
2134
        {
2135
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2136
        }
2137
    }
2138
    env->ldt.selector = selector;
2139
}
2140

    
2141
void helper_ltr(int selector)
2142
{
2143
    SegmentCache *dt;
2144
    uint32_t e1, e2;
2145
    int index, type, entry_limit;
2146
    target_ulong ptr;
2147

    
2148
    selector &= 0xffff;
2149
    if ((selector & 0xfffc) == 0) {
2150
        /* NULL selector case: invalid TR */
2151
        env->tr.base = 0;
2152
        env->tr.limit = 0;
2153
        env->tr.flags = 0;
2154
    } else {
2155
        if (selector & 0x4)
2156
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157
        dt = &env->gdt;
2158
        index = selector & ~7;
2159
#ifdef TARGET_X86_64
2160
        if (env->hflags & HF_LMA_MASK)
2161
            entry_limit = 15;
2162
        else
2163
#endif
2164
            entry_limit = 7;
2165
        if ((index + entry_limit) > dt->limit)
2166
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2167
        ptr = dt->base + index;
2168
        e1 = ldl_kernel(ptr);
2169
        e2 = ldl_kernel(ptr + 4);
2170
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2171
        if ((e2 & DESC_S_MASK) ||
2172
            (type != 1 && type != 9))
2173
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2174
        if (!(e2 & DESC_P_MASK))
2175
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2176
#ifdef TARGET_X86_64
2177
        if (env->hflags & HF_LMA_MASK) {
2178
            uint32_t e3, e4;
2179
            e3 = ldl_kernel(ptr + 8);
2180
            e4 = ldl_kernel(ptr + 12);
2181
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2182
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2184
            env->tr.base |= (target_ulong)e3 << 32;
2185
        } else
2186
#endif
2187
        {
2188
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2189
        }
2190
        e2 |= DESC_TSS_BUSY_MASK;
2191
        stl_kernel(ptr + 4, e2);
2192
    }
2193
    env->tr.selector = selector;
2194
}
2195

    
2196
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2197
void helper_load_seg(int seg_reg, int selector)
2198
{
2199
    uint32_t e1, e2;
2200
    int cpl, dpl, rpl;
2201
    SegmentCache *dt;
2202
    int index;
2203
    target_ulong ptr;
2204

    
2205
    selector &= 0xffff;
2206
    cpl = env->hflags & HF_CPL_MASK;
2207
    if ((selector & 0xfffc) == 0) {
2208
        /* null selector case */
2209
        if (seg_reg == R_SS
2210
#ifdef TARGET_X86_64
2211
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2212
#endif
2213
            )
2214
            raise_exception_err(EXCP0D_GPF, 0);
2215
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2216
    } else {
2217

    
2218
        if (selector & 0x4)
2219
            dt = &env->ldt;
2220
        else
2221
            dt = &env->gdt;
2222
        index = selector & ~7;
2223
        if ((index + 7) > dt->limit)
2224
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2225
        ptr = dt->base + index;
2226
        e1 = ldl_kernel(ptr);
2227
        e2 = ldl_kernel(ptr + 4);
2228

    
2229
        if (!(e2 & DESC_S_MASK))
2230
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2231
        rpl = selector & 3;
2232
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2233
        if (seg_reg == R_SS) {
2234
            /* must be writable segment */
2235
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2236
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2237
            if (rpl != cpl || dpl != cpl)
2238
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2239
        } else {
2240
            /* must be readable segment */
2241
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2242
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2243

    
2244
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2245
                /* if not conforming code, test rights */
2246
                if (dpl < cpl || dpl < rpl)
2247
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248
            }
2249
        }
2250

    
2251
        if (!(e2 & DESC_P_MASK)) {
2252
            if (seg_reg == R_SS)
2253
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2254
            else
2255
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2256
        }
2257

    
2258
        /* set the access bit if not already set */
2259
        if (!(e2 & DESC_A_MASK)) {
2260
            e2 |= DESC_A_MASK;
2261
            stl_kernel(ptr + 4, e2);
2262
        }
2263

    
2264
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2265
                       get_seg_base(e1, e2),
2266
                       get_seg_limit(e1, e2),
2267
                       e2);
2268
#if 0
2269
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2270
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2271
#endif
2272
    }
2273
}
2274

    
2275
/* protected mode jump */
2276
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2277
                           int next_eip_addend)
2278
{
2279
    int gate_cs, type;
2280
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2281
    target_ulong next_eip;
2282

    
2283
    if ((new_cs & 0xfffc) == 0)
2284
        raise_exception_err(EXCP0D_GPF, 0);
2285
    if (load_segment(&e1, &e2, new_cs) != 0)
2286
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287
    cpl = env->hflags & HF_CPL_MASK;
2288
    if (e2 & DESC_S_MASK) {
2289
        if (!(e2 & DESC_CS_MASK))
2290
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2292
        if (e2 & DESC_C_MASK) {
2293
            /* conforming code segment */
2294
            if (dpl > cpl)
2295
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296
        } else {
2297
            /* non conforming code segment */
2298
            rpl = new_cs & 3;
2299
            if (rpl > cpl)
2300
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2301
            if (dpl != cpl)
2302
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2303
        }
2304
        if (!(e2 & DESC_P_MASK))
2305
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2306
        limit = get_seg_limit(e1, e2);
2307
        if (new_eip > limit &&
2308
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2309
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2311
                       get_seg_base(e1, e2), limit, e2);
2312
        EIP = new_eip;
2313
    } else {
2314
        /* jump to call or task gate */
2315
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316
        rpl = new_cs & 3;
2317
        cpl = env->hflags & HF_CPL_MASK;
2318
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2319
        switch(type) {
2320
        case 1: /* 286 TSS */
2321
        case 9: /* 386 TSS */
2322
        case 5: /* task gate */
2323
            if (dpl < cpl || dpl < rpl)
2324
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325
            next_eip = env->eip + next_eip_addend;
2326
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2327
            CC_OP = CC_OP_EFLAGS;
2328
            break;
2329
        case 4: /* 286 call gate */
2330
        case 12: /* 386 call gate */
2331
            if ((dpl < cpl) || (dpl < rpl))
2332
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333
            if (!(e2 & DESC_P_MASK))
2334
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2335
            gate_cs = e1 >> 16;
2336
            new_eip = (e1 & 0xffff);
2337
            if (type == 12)
2338
                new_eip |= (e2 & 0xffff0000);
2339
            if (load_segment(&e1, &e2, gate_cs) != 0)
2340
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2341
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2342
            /* must be code segment */
2343
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2344
                 (DESC_S_MASK | DESC_CS_MASK)))
2345
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2346
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2347
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2348
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2349
            if (!(e2 & DESC_P_MASK))
2350
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2351
            limit = get_seg_limit(e1, e2);
2352
            if (new_eip > limit)
2353
                raise_exception_err(EXCP0D_GPF, 0);
2354
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2355
                                   get_seg_base(e1, e2), limit, e2);
2356
            EIP = new_eip;
2357
            break;
2358
        default:
2359
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2360
            break;
2361
        }
2362
    }
2363
}
2364

    
2365
/* real mode call */
2366
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2367
                       int shift, int next_eip)
2368
{
2369
    int new_eip;
2370
    uint32_t esp, esp_mask;
2371
    target_ulong ssp;
2372

    
2373
    new_eip = new_eip1;
2374
    esp = ESP;
2375
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2376
    ssp = env->segs[R_SS].base;
2377
    if (shift) {
2378
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2379
        PUSHL(ssp, esp, esp_mask, next_eip);
2380
    } else {
2381
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2382
        PUSHW(ssp, esp, esp_mask, next_eip);
2383
    }
2384

    
2385
    SET_ESP(esp, esp_mask);
2386
    env->eip = new_eip;
2387
    env->segs[R_CS].selector = new_cs;
2388
    env->segs[R_CS].base = (new_cs << 4);
2389
}
2390

    
2391
/* protected mode call */
2392
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2393
                            int shift, int next_eip_addend)
2394
{
2395
    int new_stack, i;
2396
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2397
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2398
    uint32_t val, limit, old_sp_mask;
2399
    target_ulong ssp, old_ssp, next_eip;
2400

    
2401
    next_eip = env->eip + next_eip_addend;
2402
#ifdef DEBUG_PCALL
2403
    if (loglevel & CPU_LOG_PCALL) {
2404
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2405
                new_cs, (uint32_t)new_eip, shift);
2406
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2407
    }
2408
#endif
2409
    if ((new_cs & 0xfffc) == 0)
2410
        raise_exception_err(EXCP0D_GPF, 0);
2411
    if (load_segment(&e1, &e2, new_cs) != 0)
2412
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2413
    cpl = env->hflags & HF_CPL_MASK;
2414
#ifdef DEBUG_PCALL
2415
    if (loglevel & CPU_LOG_PCALL) {
2416
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2417
    }
2418
#endif
2419
    if (e2 & DESC_S_MASK) {
2420
        if (!(e2 & DESC_CS_MASK))
2421
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2423
        if (e2 & DESC_C_MASK) {
2424
            /* conforming code segment */
2425
            if (dpl > cpl)
2426
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2427
        } else {
2428
            /* non conforming code segment */
2429
            rpl = new_cs & 3;
2430
            if (rpl > cpl)
2431
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2432
            if (dpl != cpl)
2433
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434
        }
2435
        if (!(e2 & DESC_P_MASK))
2436
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2437

    
2438
#ifdef TARGET_X86_64
2439
        /* XXX: check 16/32 bit cases in long mode */
2440
        if (shift == 2) {
2441
            target_ulong rsp;
2442
            /* 64 bit case */
2443
            rsp = ESP;
2444
            PUSHQ(rsp, env->segs[R_CS].selector);
2445
            PUSHQ(rsp, next_eip);
2446
            /* from this point, not restartable */
2447
            ESP = rsp;
2448
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2449
                                   get_seg_base(e1, e2),
2450
                                   get_seg_limit(e1, e2), e2);
2451
            EIP = new_eip;
2452
        } else
2453
#endif
2454
        {
2455
            sp = ESP;
2456
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2457
            ssp = env->segs[R_SS].base;
2458
            if (shift) {
2459
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2460
                PUSHL(ssp, sp, sp_mask, next_eip);
2461
            } else {
2462
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2463
                PUSHW(ssp, sp, sp_mask, next_eip);
2464
            }
2465

    
2466
            limit = get_seg_limit(e1, e2);
2467
            if (new_eip > limit)
2468
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2469
            /* from this point, not restartable */
2470
            SET_ESP(sp, sp_mask);
2471
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2472
                                   get_seg_base(e1, e2), limit, e2);
2473
            EIP = new_eip;
2474
        }
2475
    } else {
2476
        /* check gate type */
2477
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2478
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2479
        rpl = new_cs & 3;
2480
        switch(type) {
2481
        case 1: /* available 286 TSS */
2482
        case 9: /* available 386 TSS */
2483
        case 5: /* task gate */
2484
            if (dpl < cpl || dpl < rpl)
2485
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2486
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2487
            CC_OP = CC_OP_EFLAGS;
2488
            return;
2489
        case 4: /* 286 call gate */
2490
        case 12: /* 386 call gate */
2491
            break;
2492
        default:
2493
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2494
            break;
2495
        }
2496
        shift = type >> 3;
2497

    
2498
        if (dpl < cpl || dpl < rpl)
2499
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500
        /* check valid bit */
2501
        if (!(e2 & DESC_P_MASK))
2502
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2503
        selector = e1 >> 16;
2504
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2505
        param_count = e2 & 0x1f;
2506
        if ((selector & 0xfffc) == 0)
2507
            raise_exception_err(EXCP0D_GPF, 0);
2508

    
2509
        if (load_segment(&e1, &e2, selector) != 0)
2510
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2512
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2513
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2514
        if (dpl > cpl)
2515
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2516
        if (!(e2 & DESC_P_MASK))
2517
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2518

    
2519
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2520
            /* to inner privilege */
2521
            get_ss_esp_from_tss(&ss, &sp, dpl);
2522
#ifdef DEBUG_PCALL
2523
            if (loglevel & CPU_LOG_PCALL)
2524
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2525
                        ss, sp, param_count, ESP);
2526
#endif
2527
            if ((ss & 0xfffc) == 0)
2528
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2529
            if ((ss & 3) != dpl)
2530
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2531
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2532
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2533
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2534
            if (ss_dpl != dpl)
2535
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2536
            if (!(ss_e2 & DESC_S_MASK) ||
2537
                (ss_e2 & DESC_CS_MASK) ||
2538
                !(ss_e2 & DESC_W_MASK))
2539
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2540
            if (!(ss_e2 & DESC_P_MASK))
2541
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2542

    
2543
            //            push_size = ((param_count * 2) + 8) << shift;
2544

    
2545
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2546
            old_ssp = env->segs[R_SS].base;
2547

    
2548
            sp_mask = get_sp_mask(ss_e2);
2549
            ssp = get_seg_base(ss_e1, ss_e2);
2550
            if (shift) {
2551
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2552
                PUSHL(ssp, sp, sp_mask, ESP);
2553
                for(i = param_count - 1; i >= 0; i--) {
2554
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2555
                    PUSHL(ssp, sp, sp_mask, val);
2556
                }
2557
            } else {
2558
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2559
                PUSHW(ssp, sp, sp_mask, ESP);
2560
                for(i = param_count - 1; i >= 0; i--) {
2561
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2562
                    PUSHW(ssp, sp, sp_mask, val);
2563
                }
2564
            }
2565
            new_stack = 1;
2566
        } else {
2567
            /* to same privilege */
2568
            sp = ESP;
2569
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2570
            ssp = env->segs[R_SS].base;
2571
            //            push_size = (4 << shift);
2572
            new_stack = 0;
2573
        }
2574

    
2575
        if (shift) {
2576
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2577
            PUSHL(ssp, sp, sp_mask, next_eip);
2578
        } else {
2579
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2580
            PUSHW(ssp, sp, sp_mask, next_eip);
2581
        }
2582

    
2583
        /* from this point, not restartable */
2584

    
2585
        if (new_stack) {
2586
            ss = (ss & ~3) | dpl;
2587
            cpu_x86_load_seg_cache(env, R_SS, ss,
2588
                                   ssp,
2589
                                   get_seg_limit(ss_e1, ss_e2),
2590
                                   ss_e2);
2591
        }
2592

    
2593
        selector = (selector & ~3) | dpl;
2594
        cpu_x86_load_seg_cache(env, R_CS, selector,
2595
                       get_seg_base(e1, e2),
2596
                       get_seg_limit(e1, e2),
2597
                       e2);
2598
        cpu_x86_set_cpl(env, dpl);
2599
        SET_ESP(sp, sp_mask);
2600
        EIP = offset;
2601
    }
2602
#ifdef USE_KQEMU
2603
    if (kqemu_is_ok(env)) {
2604
        env->exception_index = -1;
2605
        cpu_loop_exit();
2606
    }
2607
#endif
2608
}
2609

    
2610
/* real and vm86 mode iret */
2611
void helper_iret_real(int shift)
2612
{
2613
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2614
    target_ulong ssp;
2615
    int eflags_mask;
2616

    
2617
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2618
    sp = ESP;
2619
    ssp = env->segs[R_SS].base;
2620
    if (shift == 1) {
2621
        /* 32 bits */
2622
        POPL(ssp, sp, sp_mask, new_eip);
2623
        POPL(ssp, sp, sp_mask, new_cs);
2624
        new_cs &= 0xffff;
2625
        POPL(ssp, sp, sp_mask, new_eflags);
2626
    } else {
2627
        /* 16 bits */
2628
        POPW(ssp, sp, sp_mask, new_eip);
2629
        POPW(ssp, sp, sp_mask, new_cs);
2630
        POPW(ssp, sp, sp_mask, new_eflags);
2631
    }
2632
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2633
    load_seg_vm(R_CS, new_cs);
2634
    env->eip = new_eip;
2635
    if (env->eflags & VM_MASK)
2636
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2637
    else
2638
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2639
    if (shift == 0)
2640
        eflags_mask &= 0xffff;
2641
    load_eflags(new_eflags, eflags_mask);
2642
    env->hflags2 &= ~HF2_NMI_MASK;
2643
}
2644

    
2645
static inline void validate_seg(int seg_reg, int cpl)
2646
{
2647
    int dpl;
2648
    uint32_t e2;
2649

    
2650
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2651
       they may still contain a valid base. I would be interested to
2652
       know how a real x86_64 CPU behaves */
2653
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2654
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2655
        return;
2656

    
2657
    e2 = env->segs[seg_reg].flags;
2658
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2659
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2660
        /* data or non conforming code segment */
2661
        if (dpl < cpl) {
2662
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2663
        }
2664
    }
2665
}
2666

    
2667
/* protected mode iret */
2668
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2669
{
2670
    uint32_t new_cs, new_eflags, new_ss;
2671
    uint32_t new_es, new_ds, new_fs, new_gs;
2672
    uint32_t e1, e2, ss_e1, ss_e2;
2673
    int cpl, dpl, rpl, eflags_mask, iopl;
2674
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2675

    
2676
#ifdef TARGET_X86_64
2677
    if (shift == 2)
2678
        sp_mask = -1;
2679
    else
2680
#endif
2681
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2682
    sp = ESP;
2683
    ssp = env->segs[R_SS].base;
2684
    new_eflags = 0; /* avoid warning */
2685
#ifdef TARGET_X86_64
2686
    if (shift == 2) {
2687
        POPQ(sp, new_eip);
2688
        POPQ(sp, new_cs);
2689
        new_cs &= 0xffff;
2690
        if (is_iret) {
2691
            POPQ(sp, new_eflags);
2692
        }
2693
    } else
2694
#endif
2695
    if (shift == 1) {
2696
        /* 32 bits */
2697
        POPL(ssp, sp, sp_mask, new_eip);
2698
        POPL(ssp, sp, sp_mask, new_cs);
2699
        new_cs &= 0xffff;
2700
        if (is_iret) {
2701
            POPL(ssp, sp, sp_mask, new_eflags);
2702
            if (new_eflags & VM_MASK)
2703
                goto return_to_vm86;
2704
        }
2705
    } else {
2706
        /* 16 bits */
2707
        POPW(ssp, sp, sp_mask, new_eip);
2708
        POPW(ssp, sp, sp_mask, new_cs);
2709
        if (is_iret)
2710
            POPW(ssp, sp, sp_mask, new_eflags);
2711
    }
2712
#ifdef DEBUG_PCALL
2713
    if (loglevel & CPU_LOG_PCALL) {
2714
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2715
                new_cs, new_eip, shift, addend);
2716
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2717
    }
2718
#endif
2719
    if ((new_cs & 0xfffc) == 0)
2720
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2721
    if (load_segment(&e1, &e2, new_cs) != 0)
2722
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723
    if (!(e2 & DESC_S_MASK) ||
2724
        !(e2 & DESC_CS_MASK))
2725
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2726
    cpl = env->hflags & HF_CPL_MASK;
2727
    rpl = new_cs & 3;
2728
    if (rpl < cpl)
2729
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2730
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2731
    if (e2 & DESC_C_MASK) {
2732
        if (dpl > rpl)
2733
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2734
    } else {
2735
        if (dpl != rpl)
2736
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2737
    }
2738
    if (!(e2 & DESC_P_MASK))
2739
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2740

    
2741
    sp += addend;
2742
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2743
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2744
        /* return to same privilege level */
2745
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2746
                       get_seg_base(e1, e2),
2747
                       get_seg_limit(e1, e2),
2748
                       e2);
2749
    } else {
2750
        /* return to different privilege level */
2751
#ifdef TARGET_X86_64
2752
        if (shift == 2) {
2753
            POPQ(sp, new_esp);
2754
            POPQ(sp, new_ss);
2755
            new_ss &= 0xffff;
2756
        } else
2757
#endif
2758
        if (shift == 1) {
2759
            /* 32 bits */
2760
            POPL(ssp, sp, sp_mask, new_esp);
2761
            POPL(ssp, sp, sp_mask, new_ss);
2762
            new_ss &= 0xffff;
2763
        } else {
2764
            /* 16 bits */
2765
            POPW(ssp, sp, sp_mask, new_esp);
2766
            POPW(ssp, sp, sp_mask, new_ss);
2767
        }
2768
#ifdef DEBUG_PCALL
2769
        if (loglevel & CPU_LOG_PCALL) {
2770
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2771
                    new_ss, new_esp);
2772
        }
2773
#endif
2774
        if ((new_ss & 0xfffc) == 0) {
2775
#ifdef TARGET_X86_64
2776
            /* NULL ss is allowed in long mode if cpl != 3*/
2777
            /* XXX: test CS64 ? */
2778
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2779
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2780
                                       0, 0xffffffff,
2781
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2782
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2783
                                       DESC_W_MASK | DESC_A_MASK);
2784
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2785
            } else
2786
#endif
2787
            {
2788
                raise_exception_err(EXCP0D_GPF, 0);
2789
            }
2790
        } else {
2791
            if ((new_ss & 3) != rpl)
2792
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2793
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2794
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2795
            if (!(ss_e2 & DESC_S_MASK) ||
2796
                (ss_e2 & DESC_CS_MASK) ||
2797
                !(ss_e2 & DESC_W_MASK))
2798
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2799
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2800
            if (dpl != rpl)
2801
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2802
            if (!(ss_e2 & DESC_P_MASK))
2803
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2804
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2805
                                   get_seg_base(ss_e1, ss_e2),
2806
                                   get_seg_limit(ss_e1, ss_e2),
2807
                                   ss_e2);
2808
        }
2809

    
2810
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2811
                       get_seg_base(e1, e2),
2812
                       get_seg_limit(e1, e2),
2813
                       e2);
2814
        cpu_x86_set_cpl(env, rpl);
2815
        sp = new_esp;
2816
#ifdef TARGET_X86_64
2817
        if (env->hflags & HF_CS64_MASK)
2818
            sp_mask = -1;
2819
        else
2820
#endif
2821
            sp_mask = get_sp_mask(ss_e2);
2822

    
2823
        /* validate data segments */
2824
        validate_seg(R_ES, rpl);
2825
        validate_seg(R_DS, rpl);
2826
        validate_seg(R_FS, rpl);
2827
        validate_seg(R_GS, rpl);
2828

    
2829
        sp += addend;
2830
    }
2831
    SET_ESP(sp, sp_mask);
2832
    env->eip = new_eip;
2833
    if (is_iret) {
2834
        /* NOTE: 'cpl' is the _old_ CPL */
2835
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2836
        if (cpl == 0)
2837
            eflags_mask |= IOPL_MASK;
2838
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2839
        if (cpl <= iopl)
2840
            eflags_mask |= IF_MASK;
2841
        if (shift == 0)
2842
            eflags_mask &= 0xffff;
2843
        load_eflags(new_eflags, eflags_mask);
2844
    }
2845
    return;
2846

    
2847
 return_to_vm86:
2848
    POPL(ssp, sp, sp_mask, new_esp);
2849
    POPL(ssp, sp, sp_mask, new_ss);
2850
    POPL(ssp, sp, sp_mask, new_es);
2851
    POPL(ssp, sp, sp_mask, new_ds);
2852
    POPL(ssp, sp, sp_mask, new_fs);
2853
    POPL(ssp, sp, sp_mask, new_gs);
2854

    
2855
    /* modify processor state */
2856
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2857
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2858
    load_seg_vm(R_CS, new_cs & 0xffff);
2859
    cpu_x86_set_cpl(env, 3);
2860
    load_seg_vm(R_SS, new_ss & 0xffff);
2861
    load_seg_vm(R_ES, new_es & 0xffff);
2862
    load_seg_vm(R_DS, new_ds & 0xffff);
2863
    load_seg_vm(R_FS, new_fs & 0xffff);
2864
    load_seg_vm(R_GS, new_gs & 0xffff);
2865

    
2866
    env->eip = new_eip & 0xffff;
2867
    ESP = new_esp;
2868
}
2869

    
2870
void helper_iret_protected(int shift, int next_eip)
2871
{
2872
    int tss_selector, type;
2873
    uint32_t e1, e2;
2874

    
2875
    /* specific case for TSS */
2876
    if (env->eflags & NT_MASK) {
2877
#ifdef TARGET_X86_64
2878
        if (env->hflags & HF_LMA_MASK)
2879
            raise_exception_err(EXCP0D_GPF, 0);
2880
#endif
2881
        tss_selector = lduw_kernel(env->tr.base + 0);
2882
        if (tss_selector & 4)
2883
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2884
        if (load_segment(&e1, &e2, tss_selector) != 0)
2885
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2886
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2887
        /* NOTE: we check both segment and busy TSS */
2888
        if (type != 3)
2889
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2890
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2891
    } else {
2892
        helper_ret_protected(shift, 1, 0);
2893
    }
2894
    env->hflags2 &= ~HF2_NMI_MASK;
2895
#ifdef USE_KQEMU
2896
    if (kqemu_is_ok(env)) {
2897
        CC_OP = CC_OP_EFLAGS;
2898
        env->exception_index = -1;
2899
        cpu_loop_exit();
2900
    }
2901
#endif
2902
}
2903

    
2904
void helper_lret_protected(int shift, int addend)
2905
{
2906
    helper_ret_protected(shift, 0, addend);
2907
#ifdef USE_KQEMU
2908
    if (kqemu_is_ok(env)) {
2909
        env->exception_index = -1;
2910
        cpu_loop_exit();
2911
    }
2912
#endif
2913
}
2914

    
2915
void helper_sysenter(void)
2916
{
2917
    if (env->sysenter_cs == 0) {
2918
        raise_exception_err(EXCP0D_GPF, 0);
2919
    }
2920
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2921
    cpu_x86_set_cpl(env, 0);
2922

    
2923
#ifdef TARGET_X86_64
2924
    if (env->hflags & HF_LMA_MASK) {
2925
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2926
                               0, 0xffffffff,
2927
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2928
                               DESC_S_MASK |
2929
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2930
    } else
2931
#endif
2932
    {
2933
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2934
                               0, 0xffffffff,
2935
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2936
                               DESC_S_MASK |
2937
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2938
    }
2939
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2940
                           0, 0xffffffff,
2941
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2942
                           DESC_S_MASK |
2943
                           DESC_W_MASK | DESC_A_MASK);
2944
    ESP = env->sysenter_esp;
2945
    EIP = env->sysenter_eip;
2946
}
2947

    
2948
void helper_sysexit(int dflag)
2949
{
2950
    int cpl;
2951

    
2952
    cpl = env->hflags & HF_CPL_MASK;
2953
    if (env->sysenter_cs == 0 || cpl != 0) {
2954
        raise_exception_err(EXCP0D_GPF, 0);
2955
    }
2956
    cpu_x86_set_cpl(env, 3);
2957
#ifdef TARGET_X86_64
2958
    if (dflag == 2) {
2959
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2960
                               0, 0xffffffff,
2961
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2962
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2963
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2964
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2965
                               0, 0xffffffff,
2966
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2967
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2968
                               DESC_W_MASK | DESC_A_MASK);
2969
    } else
2970
#endif
2971
    {
2972
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2973
                               0, 0xffffffff,
2974
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2975
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2976
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2977
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2978
                               0, 0xffffffff,
2979
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2980
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2981
                               DESC_W_MASK | DESC_A_MASK);
2982
    }
2983
    ESP = ECX;
2984
    EIP = EDX;
2985
#ifdef USE_KQEMU
2986
    if (kqemu_is_ok(env)) {
2987
        env->exception_index = -1;
2988
        cpu_loop_exit();
2989
    }
2990
#endif
2991
}
2992

    
2993
#if defined(CONFIG_USER_ONLY)
2994
target_ulong helper_read_crN(int reg)
2995
{
2996
    return 0;
2997
}
2998

    
2999
void helper_write_crN(int reg, target_ulong t0)
3000
{
3001
}
3002
#else
3003
target_ulong helper_read_crN(int reg)
3004
{
3005
    target_ulong val;
3006

    
3007
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3008
    switch(reg) {
3009
    default:
3010
        val = env->cr[reg];
3011
        break;
3012
    case 8:
3013
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3014
            val = cpu_get_apic_tpr(env);
3015
        } else {
3016
            val = env->v_tpr;
3017
        }
3018
        break;
3019
    }
3020
    return val;
3021
}
3022

    
3023
void helper_write_crN(int reg, target_ulong t0)
3024
{
3025
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3026
    switch(reg) {
3027
    case 0:
3028
        cpu_x86_update_cr0(env, t0);
3029
        break;
3030
    case 3:
3031
        cpu_x86_update_cr3(env, t0);
3032
        break;
3033
    case 4:
3034
        cpu_x86_update_cr4(env, t0);
3035
        break;
3036
    case 8:
3037
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3038
            cpu_set_apic_tpr(env, t0);
3039
        }
3040
        env->v_tpr = t0 & 0x0f;
3041
        break;
3042
    default:
3043
        env->cr[reg] = t0;
3044
        break;
3045
    }
3046
}
3047
#endif
3048

    
3049
void helper_lmsw(target_ulong t0)
3050
{
3051
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3052
       if already set to one. */
3053
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3054
    helper_write_crN(0, t0);
3055
}
3056

    
3057
void helper_clts(void)
3058
{
3059
    env->cr[0] &= ~CR0_TS_MASK;
3060
    env->hflags &= ~HF_TS_MASK;
3061
}
3062

    
3063
/* XXX: do more */
3064
void helper_movl_drN_T0(int reg, target_ulong t0)
3065
{
3066
    env->dr[reg] = t0;
3067
}
3068

    
3069
void helper_invlpg(target_ulong addr)
3070
{
3071
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3072
    tlb_flush_page(env, addr);
3073
}
3074

    
3075
void helper_rdtsc(void)
3076
{
3077
    uint64_t val;
3078

    
3079
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3080
        raise_exception(EXCP0D_GPF);
3081
    }
3082
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3083

    
3084
    val = cpu_get_tsc(env) + env->tsc_offset;
3085
    EAX = (uint32_t)(val);
3086
    EDX = (uint32_t)(val >> 32);
3087
}
3088

    
3089
void helper_rdpmc(void)
3090
{
3091
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3092
        raise_exception(EXCP0D_GPF);
3093
    }
3094
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3095
    
3096
    /* currently unimplemented */
3097
    raise_exception_err(EXCP06_ILLOP, 0);
3098
}
3099

    
3100
#if defined(CONFIG_USER_ONLY)
3101
void helper_wrmsr(void)
3102
{
3103
}
3104

    
3105
void helper_rdmsr(void)
3106
{
3107
}
3108
#else
3109
void helper_wrmsr(void)
3110
{
3111
    uint64_t val;
3112

    
3113
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3114

    
3115
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3116

    
3117
    switch((uint32_t)ECX) {
3118
    case MSR_IA32_SYSENTER_CS:
3119
        env->sysenter_cs = val & 0xffff;
3120
        break;
3121
    case MSR_IA32_SYSENTER_ESP:
3122
        env->sysenter_esp = val;
3123
        break;
3124
    case MSR_IA32_SYSENTER_EIP:
3125
        env->sysenter_eip = val;
3126
        break;
3127
    case MSR_IA32_APICBASE:
3128
        cpu_set_apic_base(env, val);
3129
        break;
3130
    case MSR_EFER:
3131
        {
3132
            uint64_t update_mask;
3133
            update_mask = 0;
3134
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3135
                update_mask |= MSR_EFER_SCE;
3136
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3137
                update_mask |= MSR_EFER_LME;
3138
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3139
                update_mask |= MSR_EFER_FFXSR;
3140
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3141
                update_mask |= MSR_EFER_NXE;
3142
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3143
                update_mask |= MSR_EFER_SVME;
3144
            cpu_load_efer(env, (env->efer & ~update_mask) |
3145
                          (val & update_mask));
3146
        }
3147
        break;
3148
    case MSR_STAR:
3149
        env->star = val;
3150
        break;
3151
    case MSR_PAT:
3152
        env->pat = val;
3153
        break;
3154
    case MSR_VM_HSAVE_PA:
3155
        env->vm_hsave = val;
3156
        break;
3157
    case MSR_IA32_PERF_STATUS:
3158
        /* tsc_increment_by_tick */ 
3159
        val = 1000ULL;
3160
        /* CPU multiplier */
3161
        val |= (((uint64_t)4ULL) << 40);
3162
        break;
3163
#ifdef TARGET_X86_64
3164
    case MSR_LSTAR:
3165
        env->lstar = val;
3166
        break;
3167
    case MSR_CSTAR:
3168
        env->cstar = val;
3169
        break;
3170
    case MSR_FMASK:
3171
        env->fmask = val;
3172
        break;
3173
    case MSR_FSBASE:
3174
        env->segs[R_FS].base = val;
3175
        break;
3176
    case MSR_GSBASE:
3177
        env->segs[R_GS].base = val;
3178
        break;
3179
    case MSR_KERNELGSBASE:
3180
        env->kernelgsbase = val;
3181
        break;
3182
#endif
3183
    default:
3184
        /* XXX: exception ? */
3185
        break;
3186
    }
3187
}
3188

    
3189
void helper_rdmsr(void)
3190
{
3191
    uint64_t val;
3192

    
3193
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3194

    
3195
    switch((uint32_t)ECX) {
3196
    case MSR_IA32_SYSENTER_CS:
3197
        val = env->sysenter_cs;
3198
        break;
3199
    case MSR_IA32_SYSENTER_ESP:
3200
        val = env->sysenter_esp;
3201
        break;
3202
    case MSR_IA32_SYSENTER_EIP:
3203
        val = env->sysenter_eip;
3204
        break;
3205
    case MSR_IA32_APICBASE:
3206
        val = cpu_get_apic_base(env);
3207
        break;
3208
    case MSR_EFER:
3209
        val = env->efer;
3210
        break;
3211
    case MSR_STAR:
3212
        val = env->star;
3213
        break;
3214
    case MSR_PAT:
3215
        val = env->pat;
3216
        break;
3217
    case MSR_VM_HSAVE_PA:
3218
        val = env->vm_hsave;
3219
        break;
3220
#ifdef TARGET_X86_64
3221
    case MSR_LSTAR:
3222
        val = env->lstar;
3223
        break;
3224
    case MSR_CSTAR:
3225
        val = env->cstar;
3226
        break;
3227
    case MSR_FMASK:
3228
        val = env->fmask;
3229
        break;
3230
    case MSR_FSBASE:
3231
        val = env->segs[R_FS].base;
3232
        break;
3233
    case MSR_GSBASE:
3234
        val = env->segs[R_GS].base;
3235
        break;
3236
    case MSR_KERNELGSBASE:
3237
        val = env->kernelgsbase;
3238
        break;
3239
#endif
3240
#ifdef USE_KQEMU
3241
    case MSR_QPI_COMMBASE:
3242
        if (env->kqemu_enabled) {
3243
            val = kqemu_comm_base;
3244
        } else {
3245
            val = 0;
3246
        }
3247
        break;
3248
#endif
3249
    default:
3250
        /* XXX: exception ? */
3251
        val = 0;
3252
        break;
3253
    }
3254
    EAX = (uint32_t)(val);
3255
    EDX = (uint32_t)(val >> 32);
3256
}
3257
#endif
3258

    
3259
target_ulong helper_lsl(target_ulong selector1)
3260
{
3261
    unsigned int limit;
3262
    uint32_t e1, e2, eflags, selector;
3263
    int rpl, dpl, cpl, type;
3264

    
3265
    selector = selector1 & 0xffff;
3266
    eflags = cc_table[CC_OP].compute_all();
3267
    if (load_segment(&e1, &e2, selector) != 0)
3268
        goto fail;
3269
    rpl = selector & 3;
3270
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3271
    cpl = env->hflags & HF_CPL_MASK;
3272
    if (e2 & DESC_S_MASK) {
3273
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3274
            /* conforming */
3275
        } else {
3276
            if (dpl < cpl || dpl < rpl)
3277
                goto fail;
3278
        }
3279
    } else {
3280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3281
        switch(type) {
3282
        case 1:
3283
        case 2:
3284
        case 3:
3285
        case 9:
3286
        case 11:
3287
            break;
3288
        default:
3289
            goto fail;
3290
        }
3291
        if (dpl < cpl || dpl < rpl) {
3292
        fail:
3293
            CC_SRC = eflags & ~CC_Z;
3294
            return 0;
3295
        }
3296
    }
3297
    limit = get_seg_limit(e1, e2);
3298
    CC_SRC = eflags | CC_Z;
3299
    return limit;
3300
}
3301

    
3302
target_ulong helper_lar(target_ulong selector1)
3303
{
3304
    uint32_t e1, e2, eflags, selector;
3305
    int rpl, dpl, cpl, type;
3306

    
3307
    selector = selector1 & 0xffff;
3308
    eflags = cc_table[CC_OP].compute_all();
3309
    if ((selector & 0xfffc) == 0)
3310
        goto fail;
3311
    if (load_segment(&e1, &e2, selector) != 0)
3312
        goto fail;
3313
    rpl = selector & 3;
3314
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3315
    cpl = env->hflags & HF_CPL_MASK;
3316
    if (e2 & DESC_S_MASK) {
3317
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3318
            /* conforming */
3319
        } else {
3320
            if (dpl < cpl || dpl < rpl)
3321
                goto fail;
3322
        }
3323
    } else {
3324
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3325
        switch(type) {
3326
        case 1:
3327
        case 2:
3328
        case 3:
3329
        case 4:
3330
        case 5:
3331
        case 9:
3332
        case 11:
3333
        case 12:
3334
            break;
3335
        default:
3336
            goto fail;
3337
        }
3338
        if (dpl < cpl || dpl < rpl) {
3339
        fail:
3340
            CC_SRC = eflags & ~CC_Z;
3341
            return 0;
3342
        }
3343
    }
3344
    CC_SRC = eflags | CC_Z;
3345
    return e2 & 0x00f0ff00;
3346
}
3347

    
3348
void helper_verr(target_ulong selector1)
3349
{
3350
    uint32_t e1, e2, eflags, selector;
3351
    int rpl, dpl, cpl;
3352

    
3353
    selector = selector1 & 0xffff;
3354
    eflags = cc_table[CC_OP].compute_all();
3355
    if ((selector & 0xfffc) == 0)
3356
        goto fail;
3357
    if (load_segment(&e1, &e2, selector) != 0)
3358
        goto fail;
3359
    if (!(e2 & DESC_S_MASK))
3360
        goto fail;
3361
    rpl = selector & 3;
3362
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3363
    cpl = env->hflags & HF_CPL_MASK;
3364
    if (e2 & DESC_CS_MASK) {
3365
        if (!(e2 & DESC_R_MASK))
3366
            goto fail;
3367
        if (!(e2 & DESC_C_MASK)) {
3368
            if (dpl < cpl || dpl < rpl)
3369
                goto fail;
3370
        }
3371
    } else {
3372
        if (dpl < cpl || dpl < rpl) {
3373
        fail:
3374
            CC_SRC = eflags & ~CC_Z;
3375
            return;
3376
        }
3377
    }
3378
    CC_SRC = eflags | CC_Z;
3379
}
3380

    
3381
void helper_verw(target_ulong selector1)
3382
{
3383
    uint32_t e1, e2, eflags, selector;
3384
    int rpl, dpl, cpl;
3385

    
3386
    selector = selector1 & 0xffff;
3387
    eflags = cc_table[CC_OP].compute_all();
3388
    if ((selector & 0xfffc) == 0)
3389
        goto fail;
3390
    if (load_segment(&e1, &e2, selector) != 0)
3391
        goto fail;
3392
    if (!(e2 & DESC_S_MASK))
3393
        goto fail;
3394
    rpl = selector & 3;
3395
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3396
    cpl = env->hflags & HF_CPL_MASK;
3397
    if (e2 & DESC_CS_MASK) {
3398
        goto fail;
3399
    } else {
3400
        if (dpl < cpl || dpl < rpl)
3401
            goto fail;
3402
        if (!(e2 & DESC_W_MASK)) {
3403
        fail:
3404
            CC_SRC = eflags & ~CC_Z;
3405
            return;
3406
        }
3407
    }
3408
    CC_SRC = eflags | CC_Z;
3409
}
3410

    
3411
/* x87 FPU helpers */
3412

    
3413
static void fpu_set_exception(int mask)
3414
{
3415
    env->fpus |= mask;
3416
    if (env->fpus & (~env->fpuc & FPUC_EM))
3417
        env->fpus |= FPUS_SE | FPUS_B;
3418
}
3419

    
3420
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3421
{
3422
    if (b == 0.0)
3423
        fpu_set_exception(FPUS_ZE);
3424
    return a / b;
3425
}
3426

    
3427
void fpu_raise_exception(void)
3428
{
3429
    if (env->cr[0] & CR0_NE_MASK) {
3430
        raise_exception(EXCP10_COPR);
3431
    }
3432
#if !defined(CONFIG_USER_ONLY)
3433
    else {
3434
        cpu_set_ferr(env);
3435
    }
3436
#endif
3437
}
3438

    
3439
void helper_flds_FT0(uint32_t val)
3440
{
3441
    union {
3442
        float32 f;
3443
        uint32_t i;
3444
    } u;
3445
    u.i = val;
3446
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3447
}
3448

    
3449
void helper_fldl_FT0(uint64_t val)
3450
{
3451
    union {
3452
        float64 f;
3453
        uint64_t i;
3454
    } u;
3455
    u.i = val;
3456
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3457
}
3458

    
3459
void helper_fildl_FT0(int32_t val)
3460
{
3461
    FT0 = int32_to_floatx(val, &env->fp_status);
3462
}
3463

    
3464
void helper_flds_ST0(uint32_t val)
3465
{
3466
    int new_fpstt;
3467
    union {
3468
        float32 f;
3469
        uint32_t i;
3470
    } u;
3471
    new_fpstt = (env->fpstt - 1) & 7;
3472
    u.i = val;
3473
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3474
    env->fpstt = new_fpstt;
3475
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3476
}
3477

    
3478
void helper_fldl_ST0(uint64_t val)
3479
{
3480
    int new_fpstt;
3481
    union {
3482
        float64 f;
3483
        uint64_t i;
3484
    } u;
3485
    new_fpstt = (env->fpstt - 1) & 7;
3486
    u.i = val;
3487
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3488
    env->fpstt = new_fpstt;
3489
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3490
}
3491

    
3492
void helper_fildl_ST0(int32_t val)
3493
{
3494
    int new_fpstt;
3495
    new_fpstt = (env->fpstt - 1) & 7;
3496
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3497
    env->fpstt = new_fpstt;
3498
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3499
}
3500

    
3501
void helper_fildll_ST0(int64_t val)
3502
{
3503
    int new_fpstt;
3504
    new_fpstt = (env->fpstt - 1) & 7;
3505
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3506
    env->fpstt = new_fpstt;
3507
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3508
}
3509

    
3510
uint32_t helper_fsts_ST0(void)
3511
{
3512
    union {
3513
        float32 f;
3514
        uint32_t i;
3515
    } u;
3516
    u.f = floatx_to_float32(ST0, &env->fp_status);
3517
    return u.i;
3518
}
3519

    
3520
uint64_t helper_fstl_ST0(void)
3521
{
3522
    union {
3523
        float64 f;
3524
        uint64_t i;
3525
    } u;
3526
    u.f = floatx_to_float64(ST0, &env->fp_status);
3527
    return u.i;
3528
}
3529

    
3530
int32_t helper_fist_ST0(void)
3531
{
3532
    int32_t val;
3533
    val = floatx_to_int32(ST0, &env->fp_status);
3534
    if (val != (int16_t)val)
3535
        val = -32768;
3536
    return val;
3537
}
3538

    
3539
int32_t helper_fistl_ST0(void)
3540
{
3541
    int32_t val;
3542
    val = floatx_to_int32(ST0, &env->fp_status);
3543
    return val;
3544
}
3545

    
3546
int64_t helper_fistll_ST0(void)
3547
{
3548
    int64_t val;
3549
    val = floatx_to_int64(ST0, &env->fp_status);
3550
    return val;
3551
}
3552

    
3553
int32_t helper_fistt_ST0(void)
3554
{
3555
    int32_t val;
3556
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3557
    if (val != (int16_t)val)
3558
        val = -32768;
3559
    return val;
3560
}
3561

    
3562
int32_t helper_fisttl_ST0(void)
3563
{
3564
    int32_t val;
3565
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3566
    return val;
3567
}
3568

    
3569
int64_t helper_fisttll_ST0(void)
3570
{
3571
    int64_t val;
3572
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3573
    return val;
3574
}
3575

    
3576
void helper_fldt_ST0(target_ulong ptr)
3577
{
3578
    int new_fpstt;
3579
    new_fpstt = (env->fpstt - 1) & 7;
3580
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3581
    env->fpstt = new_fpstt;
3582
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3583
}
3584

    
3585
void helper_fstt_ST0(target_ulong ptr)
3586
{
3587
    helper_fstt(ST0, ptr);
3588
}
3589

    
3590
void helper_fpush(void)
3591
{
3592
    fpush();
3593
}
3594

    
3595
void helper_fpop(void)
3596
{
3597
    fpop();
3598
}
3599

    
3600
void helper_fdecstp(void)
3601
{
3602
    env->fpstt = (env->fpstt - 1) & 7;
3603
    env->fpus &= (~0x4700);
3604
}
3605

    
3606
void helper_fincstp(void)
3607
{
3608
    env->fpstt = (env->fpstt + 1) & 7;
3609
    env->fpus &= (~0x4700);
3610
}
3611

    
3612
/* FPU move */
3613

    
3614
void helper_ffree_STN(int st_index)
3615
{
3616
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3617
}
3618

    
3619
void helper_fmov_ST0_FT0(void)
3620
{
3621
    ST0 = FT0;
3622
}
3623

    
3624
void helper_fmov_FT0_STN(int st_index)
3625
{
3626
    FT0 = ST(st_index);
3627
}
3628

    
3629
void helper_fmov_ST0_STN(int st_index)
3630
{
3631
    ST0 = ST(st_index);
3632
}
3633

    
3634
void helper_fmov_STN_ST0(int st_index)
3635
{
3636
    ST(st_index) = ST0;
3637
}
3638

    
3639
void helper_fxchg_ST0_STN(int st_index)
3640
{
3641
    CPU86_LDouble tmp;
3642
    tmp = ST(st_index);
3643
    ST(st_index) = ST0;
3644
    ST0 = tmp;
3645
}
3646

    
3647
/* FPU operations */
3648

    
3649
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3650

    
3651
void helper_fcom_ST0_FT0(void)
3652
{
3653
    int ret;
3654

    
3655
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3656
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3657
    FORCE_RET();
3658
}
3659

    
3660
void helper_fucom_ST0_FT0(void)
3661
{
3662
    int ret;
3663

    
3664
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3665
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3666
    FORCE_RET();
3667
}
3668

    
3669
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3670

    
3671
void helper_fcomi_ST0_FT0(void)
3672
{
3673
    int eflags;
3674
    int ret;
3675

    
3676
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3677
    eflags = cc_table[CC_OP].compute_all();
3678
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3679
    CC_SRC = eflags;
3680
    FORCE_RET();
3681
}
3682

    
3683
void helper_fucomi_ST0_FT0(void)
3684
{
3685
    int eflags;
3686
    int ret;
3687

    
3688
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3689
    eflags = cc_table[CC_OP].compute_all();
3690
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3691
    CC_SRC = eflags;
3692
    FORCE_RET();
3693
}
3694

    
3695
void helper_fadd_ST0_FT0(void)
3696
{
3697
    ST0 += FT0;
3698
}
3699

    
3700
void helper_fmul_ST0_FT0(void)
3701
{
3702
    ST0 *= FT0;
3703
}
3704

    
3705
void helper_fsub_ST0_FT0(void)
3706
{
3707
    ST0 -= FT0;
3708
}
3709

    
3710
void helper_fsubr_ST0_FT0(void)
3711
{
3712
    ST0 = FT0 - ST0;
3713
}
3714

    
3715
void helper_fdiv_ST0_FT0(void)
3716
{
3717
    ST0 = helper_fdiv(ST0, FT0);
3718
}
3719

    
3720
void helper_fdivr_ST0_FT0(void)
3721
{
3722
    ST0 = helper_fdiv(FT0, ST0);
3723
}
3724

    
3725
/* fp operations between STN and ST0 */
3726

    
3727
void helper_fadd_STN_ST0(int st_index)
3728
{
3729
    ST(st_index) += ST0;
3730
}
3731

    
3732
void helper_fmul_STN_ST0(int st_index)
3733
{
3734
    ST(st_index) *= ST0;
3735
}
3736

    
3737
void helper_fsub_STN_ST0(int st_index)
3738
{
3739
    ST(st_index) -= ST0;
3740
}
3741

    
3742
void helper_fsubr_STN_ST0(int st_index)
3743
{
3744
    CPU86_LDouble *p;
3745
    p = &ST(st_index);
3746
    *p = ST0 - *p;
3747
}
3748

    
3749
void helper_fdiv_STN_ST0(int st_index)
3750
{
3751
    CPU86_LDouble *p;
3752
    p = &ST(st_index);
3753
    *p = helper_fdiv(*p, ST0);
3754
}
3755

    
3756
void helper_fdivr_STN_ST0(int st_index)
3757
{
3758
    CPU86_LDouble *p;
3759
    p = &ST(st_index);
3760
    *p = helper_fdiv(ST0, *p);
3761
}
3762

    
3763
/* misc FPU operations */
3764
void helper_fchs_ST0(void)
3765
{
3766
    ST0 = floatx_chs(ST0);
3767
}
3768

    
3769
void helper_fabs_ST0(void)
3770
{
3771
    ST0 = floatx_abs(ST0);
3772
}
3773

    
3774
void helper_fld1_ST0(void)
3775
{
3776
    ST0 = f15rk[1];
3777
}
3778

    
3779
void helper_fldl2t_ST0(void)
3780
{
3781
    ST0 = f15rk[6];
3782
}
3783

    
3784
void helper_fldl2e_ST0(void)
3785
{
3786
    ST0 = f15rk[5];
3787
}
3788

    
3789
void helper_fldpi_ST0(void)
3790
{
3791
    ST0 = f15rk[2];
3792
}
3793

    
3794
void helper_fldlg2_ST0(void)
3795
{
3796
    ST0 = f15rk[3];
3797
}
3798

    
3799
void helper_fldln2_ST0(void)
3800
{
3801
    ST0 = f15rk[4];
3802
}
3803

    
3804
void helper_fldz_ST0(void)
3805
{
3806
    ST0 = f15rk[0];
3807
}
3808

    
3809
void helper_fldz_FT0(void)
3810
{
3811
    FT0 = f15rk[0];
3812
}
3813

    
3814
uint32_t helper_fnstsw(void)
3815
{
3816
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3817
}
3818

    
3819
uint32_t helper_fnstcw(void)
3820
{
3821
    return env->fpuc;
3822
}
3823

    
3824
static void update_fp_status(void)
3825
{
3826
    int rnd_type;
3827

    
3828
    /* set rounding mode */
3829
    switch(env->fpuc & RC_MASK) {
3830
    default:
3831
    case RC_NEAR:
3832
        rnd_type = float_round_nearest_even;
3833
        break;
3834
    case RC_DOWN:
3835
        rnd_type = float_round_down;
3836
        break;
3837
    case RC_UP:
3838
        rnd_type = float_round_up;
3839
        break;
3840
    case RC_CHOP:
3841
        rnd_type = float_round_to_zero;
3842
        break;
3843
    }
3844
    set_float_rounding_mode(rnd_type, &env->fp_status);
3845
#ifdef FLOATX80
3846
    switch((env->fpuc >> 8) & 3) {
3847
    case 0:
3848
        rnd_type = 32;
3849
        break;
3850
    case 2:
3851
        rnd_type = 64;
3852
        break;
3853
    case 3:
3854
    default:
3855
        rnd_type = 80;
3856
        break;
3857
    }
3858
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3859
#endif
3860
}
3861

    
3862
void helper_fldcw(uint32_t val)
3863
{
3864
    env->fpuc = val;
3865
    update_fp_status();
3866
}
3867

    
3868
void helper_fclex(void)
3869
{
3870
    env->fpus &= 0x7f00;
3871
}
3872

    
3873
void helper_fwait(void)
3874
{
3875
    if (env->fpus & FPUS_SE)
3876
        fpu_raise_exception();
3877
    FORCE_RET();
3878
}
3879

    
3880
void helper_fninit(void)
3881
{
3882
    env->fpus = 0;
3883
    env->fpstt = 0;
3884
    env->fpuc = 0x37f;
3885
    env->fptags[0] = 1;
3886
    env->fptags[1] = 1;
3887
    env->fptags[2] = 1;
3888
    env->fptags[3] = 1;
3889
    env->fptags[4] = 1;
3890
    env->fptags[5] = 1;
3891
    env->fptags[6] = 1;
3892
    env->fptags[7] = 1;
3893
}
3894

    
3895
/* BCD ops */
3896

    
3897
void helper_fbld_ST0(target_ulong ptr)
3898
{
3899
    CPU86_LDouble tmp;
3900
    uint64_t val;
3901
    unsigned int v;
3902
    int i;
3903

    
3904
    val = 0;
3905
    for(i = 8; i >= 0; i--) {
3906
        v = ldub(ptr + i);
3907
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3908
    }
3909
    tmp = val;
3910
    if (ldub(ptr + 9) & 0x80)
3911
        tmp = -tmp;
3912
    fpush();
3913
    ST0 = tmp;
3914
}
3915

    
3916
void helper_fbst_ST0(target_ulong ptr)
3917
{
3918
    int v;
3919
    target_ulong mem_ref, mem_end;
3920
    int64_t val;
3921

    
3922
    val = floatx_to_int64(ST0, &env->fp_status);
3923
    mem_ref = ptr;
3924
    mem_end = mem_ref + 9;
3925
    if (val < 0) {
3926
        stb(mem_end, 0x80);
3927
        val = -val;
3928
    } else {
3929
        stb(mem_end, 0x00);
3930
    }
3931
    while (mem_ref < mem_end) {
3932
        if (val == 0)
3933
            break;
3934
        v = val % 100;
3935
        val = val / 100;
3936
        v = ((v / 10) << 4) | (v % 10);
3937
        stb(mem_ref++, v);
3938
    }
3939
    while (mem_ref < mem_end) {
3940
        stb(mem_ref++, 0);
3941
    }
3942
}
3943

    
3944
void helper_f2xm1(void)
3945
{
3946
    ST0 = pow(2.0,ST0) - 1.0;
3947
}
3948

    
3949
void helper_fyl2x(void)
3950
{
3951
    CPU86_LDouble fptemp;
3952

    
3953
    fptemp = ST0;
3954
    if (fptemp>0.0){
3955
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3956
        ST1 *= fptemp;
3957
        fpop();
3958
    } else {
3959
        env->fpus &= (~0x4700);
3960
        env->fpus |= 0x400;
3961
    }
3962
}
3963

    
3964
void helper_fptan(void)
3965
{
3966
    CPU86_LDouble fptemp;
3967

    
3968
    fptemp = ST0;
3969
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3970
        env->fpus |= 0x400;
3971
    } else {
3972
        ST0 = tan(fptemp);
3973
        fpush();
3974
        ST0 = 1.0;
3975
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3976
        /* the above code is for  |arg| < 2**52 only */
3977
    }
3978
}
3979

    
3980
void helper_fpatan(void)
3981
{
3982
    CPU86_LDouble fptemp, fpsrcop;
3983

    
3984
    fpsrcop = ST1;
3985
    fptemp = ST0;
3986
    ST1 = atan2(fpsrcop,fptemp);
3987
    fpop();
3988
}
3989

    
3990
void helper_fxtract(void)
3991
{
3992
    CPU86_LDoubleU temp;
3993
    unsigned int expdif;
3994

    
3995
    temp.d = ST0;
3996
    expdif = EXPD(temp) - EXPBIAS;
3997
    /*DP exponent bias*/
3998
    ST0 = expdif;
3999
    fpush();
4000
    BIASEXPONENT(temp);
4001
    ST0 = temp.d;
4002
}
4003

    
4004
void helper_fprem1(void)
4005
{
4006
    CPU86_LDouble dblq, fpsrcop, fptemp;
4007
    CPU86_LDoubleU fpsrcop1, fptemp1;
4008
    int expdif;
4009
    signed long long int q;
4010

    
4011
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4012
        ST0 = 0.0 / 0.0; /* NaN */
4013
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4014
        return;
4015
    }
4016

    
4017
    fpsrcop = ST0;
4018
    fptemp = ST1;
4019
    fpsrcop1.d = fpsrcop;
4020
    fptemp1.d = fptemp;
4021
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4022

    
4023
    if (expdif < 0) {
4024
        /* optimisation? taken from the AMD docs */
4025
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4026
        /* ST0 is unchanged */
4027
        return;
4028
    }
4029

    
4030
    if (expdif < 53) {
4031
        dblq = fpsrcop / fptemp;
4032
        /* round dblq towards nearest integer */
4033
        dblq = rint(dblq);
4034
        ST0 = fpsrcop - fptemp * dblq;
4035

    
4036
        /* convert dblq to q by truncating towards zero */
4037
        if (dblq < 0.0)
4038
           q = (signed long long int)(-dblq);
4039
        else
4040
           q = (signed long long int)dblq;
4041

    
4042
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4043
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4044
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4045
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4046
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4047
    } else {
4048
        env->fpus |= 0x400;  /* C2 <-- 1 */
4049
        fptemp = pow(2.0, expdif - 50);
4050
        fpsrcop = (ST0 / ST1) / fptemp;
4051
        /* fpsrcop = integer obtained by chopping */
4052
        fpsrcop = (fpsrcop < 0.0) ?
4053
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4054
        ST0 -= (ST1 * fpsrcop * fptemp);
4055
    }
4056
}
4057

    
4058
void helper_fprem(void)
4059
{
4060
    CPU86_LDouble dblq, fpsrcop, fptemp;
4061
    CPU86_LDoubleU fpsrcop1, fptemp1;
4062
    int expdif;
4063
    signed long long int q;
4064

    
4065
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4066
       ST0 = 0.0 / 0.0; /* NaN */
4067
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4068
       return;
4069
    }
4070

    
4071
    fpsrcop = (CPU86_LDouble)ST0;
4072
    fptemp = (CPU86_LDouble)ST1;
4073
    fpsrcop1.d = fpsrcop;
4074
    fptemp1.d = fptemp;
4075
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4076

    
4077
    if (expdif < 0) {
4078
        /* optimisation? taken from the AMD docs */
4079
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4080
        /* ST0 is unchanged */
4081
        return;
4082
    }
4083

    
4084
    if ( expdif < 53 ) {
4085
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4086
        /* round dblq towards zero */
4087
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4088
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4089

    
4090
        /* convert dblq to q by truncating towards zero */
4091
        if (dblq < 0.0)
4092
           q = (signed long long int)(-dblq);
4093
        else
4094
           q = (signed long long int)dblq;
4095

    
4096
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4097
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4098
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4099
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4100
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4101
    } else {
4102
        int N = 32 + (expdif % 32); /* as per AMD docs */
4103
        env->fpus |= 0x400;  /* C2 <-- 1 */
4104
        fptemp = pow(2.0, (double)(expdif - N));
4105
        fpsrcop = (ST0 / ST1) / fptemp;
4106
        /* fpsrcop = integer obtained by chopping */
4107
        fpsrcop = (fpsrcop < 0.0) ?
4108
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4109
        ST0 -= (ST1 * fpsrcop * fptemp);
4110
    }
4111
}
4112

    
4113
void helper_fyl2xp1(void)
4114
{
4115
    CPU86_LDouble fptemp;
4116

    
4117
    fptemp = ST0;
4118
    if ((fptemp+1.0)>0.0) {
4119
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4120
        ST1 *= fptemp;
4121
        fpop();
4122
    } else {
4123
        env->fpus &= (~0x4700);
4124
        env->fpus |= 0x400;
4125
    }
4126
}
4127

    
4128
void helper_fsqrt(void)
4129
{
4130
    CPU86_LDouble fptemp;
4131

    
4132
    fptemp = ST0;
4133
    if (fptemp<0.0) {
4134
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4135
        env->fpus |= 0x400;
4136
    }
4137
    ST0 = sqrt(fptemp);
4138
}
4139

    
4140
void helper_fsincos(void)
4141
{
4142
    CPU86_LDouble fptemp;
4143

    
4144
    fptemp = ST0;
4145
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4146
        env->fpus |= 0x400;
4147
    } else {
4148
        ST0 = sin(fptemp);
4149
        fpush();
4150
        ST0 = cos(fptemp);
4151
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4152
        /* the above code is for  |arg| < 2**63 only */
4153
    }
4154
}
4155

    
4156
void helper_frndint(void)
4157
{
4158
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4159
}
4160

    
4161
void helper_fscale(void)
4162
{
4163
    ST0 = ldexp (ST0, (int)(ST1));
4164
}
4165

    
4166
void helper_fsin(void)
4167
{
4168
    CPU86_LDouble fptemp;
4169

    
4170
    fptemp = ST0;
4171
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4172
        env->fpus |= 0x400;
4173
    } else {
4174
        ST0 = sin(fptemp);
4175
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4176
        /* the above code is for  |arg| < 2**53 only */
4177
    }
4178
}
4179

    
4180
void helper_fcos(void)
4181
{
4182
    CPU86_LDouble fptemp;
4183

    
4184
    fptemp = ST0;
4185
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4186
        env->fpus |= 0x400;
4187
    } else {
4188
        ST0 = cos(fptemp);
4189
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4190
        /* the above code is for  |arg5 < 2**63 only */
4191
    }
4192
}
4193

    
4194
void helper_fxam_ST0(void)
4195
{
4196
    CPU86_LDoubleU temp;
4197
    int expdif;
4198

    
4199
    temp.d = ST0;
4200

    
4201
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4202
    if (SIGND(temp))
4203
        env->fpus |= 0x200; /* C1 <-- 1 */
4204

    
4205
    /* XXX: test fptags too */
4206
    expdif = EXPD(temp);
4207
    if (expdif == MAXEXPD) {
4208
#ifdef USE_X86LDOUBLE
4209
        if (MANTD(temp) == 0x8000000000000000ULL)
4210
#else
4211
        if (MANTD(temp) == 0)
4212
#endif
4213
            env->fpus |=  0x500 /*Infinity*/;
4214
        else
4215
            env->fpus |=  0x100 /*NaN*/;
4216
    } else if (expdif == 0) {
4217
        if (MANTD(temp) == 0)
4218
            env->fpus |=  0x4000 /*Zero*/;
4219
        else
4220
            env->fpus |= 0x4400 /*Denormal*/;
4221
    } else {
4222
        env->fpus |= 0x400;
4223
    }
4224
}
4225

    
4226
void helper_fstenv(target_ulong ptr, int data32)
4227
{
4228
    int fpus, fptag, exp, i;
4229
    uint64_t mant;
4230
    CPU86_LDoubleU tmp;
4231

    
4232
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4233
    fptag = 0;
4234
    for (i=7; i>=0; i--) {
4235
        fptag <<= 2;
4236
        if (env->fptags[i]) {
4237
            fptag |= 3;
4238
        } else {
4239
            tmp.d = env->fpregs[i].d;
4240
            exp = EXPD(tmp);
4241
            mant = MANTD(tmp);
4242
            if (exp == 0 && mant == 0) {
4243
                /* zero */
4244
                fptag |= 1;
4245
            } else if (exp == 0 || exp == MAXEXPD
4246
#ifdef USE_X86LDOUBLE
4247
                       || (mant & (1LL << 63)) == 0
4248
#endif
4249
                       ) {
4250
                /* NaNs, infinity, denormal */
4251
                fptag |= 2;
4252
            }
4253
        }
4254
    }
4255
    if (data32) {
4256
        /* 32 bit */
4257
        stl(ptr, env->fpuc);
4258
        stl(ptr + 4, fpus);
4259
        stl(ptr + 8, fptag);
4260
        stl(ptr + 12, 0); /* fpip */
4261
        stl(ptr + 16, 0); /* fpcs */
4262
        stl(ptr + 20, 0); /* fpoo */
4263
        stl(ptr + 24, 0); /* fpos */
4264
    } else {
4265
        /* 16 bit */
4266
        stw(ptr, env->fpuc);
4267
        stw(ptr + 2, fpus);
4268
        stw(ptr + 4, fptag);
4269
        stw(ptr + 6, 0);
4270
        stw(ptr + 8, 0);
4271
        stw(ptr + 10, 0);
4272
        stw(ptr + 12, 0);
4273
    }
4274
}
4275

    
4276
void helper_fldenv(target_ulong ptr, int data32)
4277
{
4278
    int i, fpus, fptag;
4279

    
4280
    if (data32) {
4281
        env->fpuc = lduw(ptr);
4282
        fpus = lduw(ptr + 4);
4283
        fptag = lduw(ptr + 8);
4284
    }
4285
    else {
4286
        env->fpuc = lduw(ptr);
4287
        fpus = lduw(ptr + 2);
4288
        fptag = lduw(ptr + 4);
4289
    }
4290
    env->fpstt = (fpus >> 11) & 7;
4291
    env->fpus = fpus & ~0x3800;
4292
    for(i = 0;i < 8; i++) {
4293
        env->fptags[i] = ((fptag & 3) == 3);
4294
        fptag >>= 2;
4295
    }
4296
}
4297

    
4298
void helper_fsave(target_ulong ptr, int data32)
4299
{
4300
    CPU86_LDouble tmp;
4301
    int i;
4302

    
4303
    helper_fstenv(ptr, data32);
4304

    
4305
    ptr += (14 << data32);
4306
    for(i = 0;i < 8; i++) {
4307
        tmp = ST(i);
4308
        helper_fstt(tmp, ptr);
4309
        ptr += 10;
4310
    }
4311

    
4312
    /* fninit */
4313
    env->fpus = 0;
4314
    env->fpstt = 0;
4315
    env->fpuc = 0x37f;
4316
    env->fptags[0] = 1;
4317
    env->fptags[1] = 1;
4318
    env->fptags[2] = 1;
4319
    env->fptags[3] = 1;
4320
    env->fptags[4] = 1;
4321
    env->fptags[5] = 1;
4322
    env->fptags[6] = 1;
4323
    env->fptags[7] = 1;
4324
}
4325

    
4326
void helper_frstor(target_ulong ptr, int data32)
4327
{
4328
    CPU86_LDouble tmp;
4329
    int i;
4330

    
4331
    helper_fldenv(ptr, data32);
4332
    ptr += (14 << data32);
4333

    
4334
    for(i = 0;i < 8; i++) {
4335
        tmp = helper_fldt(ptr);
4336
        ST(i) = tmp;
4337
        ptr += 10;
4338
    }
4339
}
4340

    
4341
void helper_fxsave(target_ulong ptr, int data64)
4342
{
4343
    int fpus, fptag, i, nb_xmm_regs;
4344
    CPU86_LDouble tmp;
4345
    target_ulong addr;
4346

    
4347
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4348
    fptag = 0;
4349
    for(i = 0; i < 8; i++) {
4350
        fptag |= (env->fptags[i] << i);
4351
    }
4352
    stw(ptr, env->fpuc);
4353
    stw(ptr + 2, fpus);
4354
    stw(ptr + 4, fptag ^ 0xff);
4355
#ifdef TARGET_X86_64
4356
    if (data64) {
4357
        stq(ptr + 0x08, 0); /* rip */
4358
        stq(ptr + 0x10, 0); /* rdp */
4359
    } else 
4360
#endif
4361
    {
4362
        stl(ptr + 0x08, 0); /* eip */
4363
        stl(ptr + 0x0c, 0); /* sel  */
4364
        stl(ptr + 0x10, 0); /* dp */
4365
        stl(ptr + 0x14, 0); /* sel  */
4366
    }
4367

    
4368
    addr = ptr + 0x20;
4369
    for(i = 0;i < 8; i++) {
4370
        tmp = ST(i);
4371
        helper_fstt(tmp, addr);
4372
        addr += 16;
4373
    }
4374

    
4375
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4376
        /* XXX: finish it */
4377
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4378
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4379
        if (env->hflags & HF_CS64_MASK)
4380
            nb_xmm_regs = 16;
4381
        else
4382
            nb_xmm_regs = 8;
4383
        addr = ptr + 0xa0;
4384
        for(i = 0; i < nb_xmm_regs; i++) {
4385
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4386
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4387
            addr += 16;
4388
        }
4389
    }
4390
}
4391

    
4392
void helper_fxrstor(target_ulong ptr, int data64)
4393
{
4394
    int i, fpus, fptag, nb_xmm_regs;
4395
    CPU86_LDouble tmp;
4396
    target_ulong addr;
4397

    
4398
    env->fpuc = lduw(ptr);
4399
    fpus = lduw(ptr + 2);
4400
    fptag = lduw(ptr + 4);
4401
    env->fpstt = (fpus >> 11) & 7;
4402
    env->fpus = fpus & ~0x3800;
4403
    fptag ^= 0xff;
4404
    for(i = 0;i < 8; i++) {
4405
        env->fptags[i] = ((fptag >> i) & 1);
4406
    }
4407

    
4408
    addr = ptr + 0x20;
4409
    for(i = 0;i < 8; i++) {
4410
        tmp = helper_fldt(addr);
4411
        ST(i) = tmp;
4412
        addr += 16;
4413
    }
4414

    
4415
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4416
        /* XXX: finish it */
4417
        env->mxcsr = ldl(ptr + 0x18);
4418
        //ldl(ptr + 0x1c);
4419
        if (env->hflags & HF_CS64_MASK)
4420
            nb_xmm_regs = 16;
4421
        else
4422
            nb_xmm_regs = 8;
4423
        addr = ptr + 0xa0;
4424
        for(i = 0; i < nb_xmm_regs; i++) {
4425
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4426
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4427
            addr += 16;
4428
        }
4429
    }
4430
}
4431

    
4432
#ifndef USE_X86LDOUBLE
4433

    
4434
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4435
{
4436
    CPU86_LDoubleU temp;
4437
    int e;
4438

    
4439
    temp.d = f;
4440
    /* mantissa */
4441
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4442
    /* exponent + sign */
4443
    e = EXPD(temp) - EXPBIAS + 16383;
4444
    e |= SIGND(temp) >> 16;
4445
    *pexp = e;
4446
}
4447

    
4448
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4449
{
4450
    CPU86_LDoubleU temp;
4451
    int e;
4452
    uint64_t ll;
4453

    
4454
    /* XXX: handle overflow ? */
4455
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4456
    e |= (upper >> 4) & 0x800; /* sign */
4457
    ll = (mant >> 11) & ((1LL << 52) - 1);
4458
#ifdef __arm__
4459
    temp.l.upper = (e << 20) | (ll >> 32);
4460
    temp.l.lower = ll;
4461
#else
4462
    temp.ll = ll | ((uint64_t)e << 52);
4463
#endif
4464
    return temp.d;
4465
}
4466

    
4467
#else
4468

    
4469
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4470
{
4471
    CPU86_LDoubleU temp;
4472

    
4473
    temp.d = f;
4474
    *pmant = temp.l.lower;
4475
    *pexp = temp.l.upper;
4476
}
4477

    
4478
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4479
{
4480
    CPU86_LDoubleU temp;
4481

    
4482
    temp.l.upper = upper;
4483
    temp.l.lower = mant;
4484
    return temp.d;
4485
}
4486
#endif
4487

    
4488
#ifdef TARGET_X86_64
4489

    
4490
//#define DEBUG_MULDIV
4491

    
4492
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4493
{
4494
    *plow += a;
4495
    /* carry test */
4496
    if (*plow < a)
4497
        (*phigh)++;
4498
    *phigh += b;
4499
}
4500

    
4501
static void neg128(uint64_t *plow, uint64_t *phigh)
4502
{
4503
    *plow = ~ *plow;
4504
    *phigh = ~ *phigh;
4505
    add128(plow, phigh, 1, 0);
4506
}
4507

    
4508
/* return TRUE if overflow */
4509
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4510
{
4511
    uint64_t q, r, a1, a0;
4512
    int i, qb, ab;
4513

    
4514
    a0 = *plow;
4515
    a1 = *phigh;
4516
    if (a1 == 0) {
4517
        q = a0 / b;
4518
        r = a0 % b;
4519
        *plow = q;
4520
        *phigh = r;
4521
    } else {
4522
        if (a1 >= b)
4523
            return 1;
4524
        /* XXX: use a better algorithm */
4525
        for(i = 0; i < 64; i++) {
4526
            ab = a1 >> 63;
4527
            a1 = (a1 << 1) | (a0 >> 63);
4528
            if (ab || a1 >= b) {
4529
                a1 -= b;
4530
                qb = 1;
4531
            } else {
4532
                qb = 0;
4533
            }
4534
            a0 = (a0 << 1) | qb;
4535
        }
4536
#if defined(DEBUG_MULDIV)
4537
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4538
               *phigh, *plow, b, a0, a1);
4539
#endif
4540
        *plow = a0;
4541
        *phigh = a1;
4542
    }
4543
    return 0;
4544
}
4545

    
4546
/* return TRUE if overflow */
4547
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4548
{
4549
    int sa, sb;
4550
    sa = ((int64_t)*phigh < 0);
4551
    if (sa)
4552
        neg128(plow, phigh);
4553
    sb = (b < 0);
4554
    if (sb)
4555
        b = -b;
4556
    if (div64(plow, phigh, b) != 0)
4557
        return 1;
4558
    if (sa ^ sb) {
4559
        if (*plow > (1ULL << 63))
4560
            return 1;
4561
        *plow = - *plow;
4562
    } else {
4563
        if (*plow >= (1ULL << 63))
4564
            return 1;
4565
    }
4566
    if (sa)
4567
        *phigh = - *phigh;
4568
    return 0;
4569
}
4570

    
4571
void helper_mulq_EAX_T0(target_ulong t0)
4572
{
4573
    uint64_t r0, r1;
4574

    
4575
    mulu64(&r0, &r1, EAX, t0);
4576
    EAX = r0;
4577
    EDX = r1;
4578
    CC_DST = r0;
4579
    CC_SRC = r1;
4580
}
4581

    
4582
void helper_imulq_EAX_T0(target_ulong t0)
4583
{
4584
    uint64_t r0, r1;
4585

    
4586
    muls64(&r0, &r1, EAX, t0);
4587
    EAX = r0;
4588
    EDX = r1;
4589
    CC_DST = r0;
4590
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4591
}
4592

    
4593
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4594
{
4595
    uint64_t r0, r1;
4596

    
4597
    muls64(&r0, &r1, t0, t1);
4598
    CC_DST = r0;
4599
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4600
    return r0;
4601
}
4602

    
4603
void helper_divq_EAX(target_ulong t0)
4604
{
4605
    uint64_t r0, r1;
4606
    if (t0 == 0) {
4607
        raise_exception(EXCP00_DIVZ);
4608
    }
4609
    r0 = EAX;
4610
    r1 = EDX;
4611
    if (div64(&r0, &r1, t0))
4612
        raise_exception(EXCP00_DIVZ);
4613
    EAX = r0;
4614
    EDX = r1;
4615
}
4616

    
4617
void helper_idivq_EAX(target_ulong t0)
4618
{
4619
    uint64_t r0, r1;
4620
    if (t0 == 0) {
4621
        raise_exception(EXCP00_DIVZ);
4622
    }
4623
    r0 = EAX;
4624
    r1 = EDX;
4625
    if (idiv64(&r0, &r1, t0))
4626
        raise_exception(EXCP00_DIVZ);
4627
    EAX = r0;
4628
    EDX = r1;
4629
}
4630
#endif
4631

    
4632
static void do_hlt(void)
4633
{
4634
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4635
    env->halted = 1;
4636
    env->exception_index = EXCP_HLT;
4637
    cpu_loop_exit();
4638
}
4639

    
4640
void helper_hlt(int next_eip_addend)
4641
{
4642
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4643
    EIP += next_eip_addend;
4644
    
4645
    do_hlt();
4646
}
4647

    
4648
void helper_monitor(target_ulong ptr)
4649
{
4650
    if ((uint32_t)ECX != 0)
4651
        raise_exception(EXCP0D_GPF);
4652
    /* XXX: store address ? */
4653
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4654
}
4655

    
4656
void helper_mwait(int next_eip_addend)
4657
{
4658
    if ((uint32_t)ECX != 0)
4659
        raise_exception(EXCP0D_GPF);
4660
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4661
    EIP += next_eip_addend;
4662

    
4663
    /* XXX: not complete but not completely erroneous */
4664
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4665
        /* more than one CPU: do not sleep because another CPU may
4666
           wake this one */
4667
    } else {
4668
        do_hlt();
4669
    }
4670
}
4671

    
4672
void helper_debug(void)
4673
{
4674
    env->exception_index = EXCP_DEBUG;
4675
    cpu_loop_exit();
4676
}
4677

    
4678
void helper_raise_interrupt(int intno, int next_eip_addend)
4679
{
4680
    raise_interrupt(intno, 1, 0, next_eip_addend);
4681
}
4682

    
4683
void helper_raise_exception(int exception_index)
4684
{
4685
    raise_exception(exception_index);
4686
}
4687

    
4688
void helper_cli(void)
4689
{
4690
    env->eflags &= ~IF_MASK;
4691
}
4692

    
4693
void helper_sti(void)
4694
{
4695
    env->eflags |= IF_MASK;
4696
}
4697

    
4698
#if 0
4699
/* vm86plus instructions */
4700
void helper_cli_vm(void)
4701
{
4702
    env->eflags &= ~VIF_MASK;
4703
}
4704

4705
void helper_sti_vm(void)
4706
{
4707
    env->eflags |= VIF_MASK;
4708
    if (env->eflags & VIP_MASK) {
4709
        raise_exception(EXCP0D_GPF);
4710
    }
4711
}
4712
#endif
4713

    
4714
void helper_set_inhibit_irq(void)
4715
{
4716
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4717
}
4718

    
4719
void helper_reset_inhibit_irq(void)
4720
{
4721
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4722
}
4723

    
4724
void helper_boundw(target_ulong a0, int v)
4725
{
4726
    int low, high;
4727
    low = ldsw(a0);
4728
    high = ldsw(a0 + 2);
4729
    v = (int16_t)v;
4730
    if (v < low || v > high) {
4731
        raise_exception(EXCP05_BOUND);
4732
    }
4733
    FORCE_RET();
4734
}
4735

    
4736
void helper_boundl(target_ulong a0, int v)
4737
{
4738
    int low, high;
4739
    low = ldl(a0);
4740
    high = ldl(a0 + 4);
4741
    if (v < low || v > high) {
4742
        raise_exception(EXCP05_BOUND);
4743
    }
4744
    FORCE_RET();
4745
}
4746

    
4747
static float approx_rsqrt(float a)
4748
{
4749
    return 1.0 / sqrt(a);
4750
}
4751

    
4752
static float approx_rcp(float a)
4753
{
4754
    return 1.0 / a;
4755
}
4756

    
4757
#if !defined(CONFIG_USER_ONLY)
4758

    
4759
#define MMUSUFFIX _mmu
4760

    
4761
#define SHIFT 0
4762
#include "softmmu_template.h"
4763

    
4764
#define SHIFT 1
4765
#include "softmmu_template.h"
4766

    
4767
#define SHIFT 2
4768
#include "softmmu_template.h"
4769

    
4770
#define SHIFT 3
4771
#include "softmmu_template.h"
4772

    
4773
#endif
4774

    
4775
/* try to fill the TLB and return an exception if error. If retaddr is
4776
   NULL, it means that the function was called in C code (i.e. not
4777
   from generated code or from helper.c) */
4778
/* XXX: fix it to restore all registers */
4779
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4780
{
4781
    TranslationBlock *tb;
4782
    int ret;
4783
    unsigned long pc;
4784
    CPUX86State *saved_env;
4785

    
4786
    /* XXX: hack to restore env in all cases, even if not called from
4787
       generated code */
4788
    saved_env = env;
4789
    env = cpu_single_env;
4790

    
4791
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4792
    if (ret) {
4793
        if (retaddr) {
4794
            /* now we have a real cpu fault */
4795
            pc = (unsigned long)retaddr;
4796
            tb = tb_find_pc(pc);
4797
            if (tb) {
4798
                /* the PC is inside the translated code. It means that we have
4799
                   a virtual CPU fault */
4800
                cpu_restore_state(tb, env, pc, NULL);
4801
            }
4802
        }
4803
        raise_exception_err(env->exception_index, env->error_code);
4804
    }
4805
    env = saved_env;
4806
}
4807

    
4808

    
4809
/* Secure Virtual Machine helpers */
4810

    
4811
#if defined(CONFIG_USER_ONLY)
4812

    
4813
void helper_vmrun(int aflag, int next_eip_addend)
4814
{ 
4815
}
4816
void helper_vmmcall(void) 
4817
{ 
4818
}
4819
void helper_vmload(int aflag)
4820
{ 
4821
}
4822
void helper_vmsave(int aflag)
4823
{ 
4824
}
4825
void helper_stgi(void)
4826
{
4827
}
4828
void helper_clgi(void)
4829
{
4830
}
4831
void helper_skinit(void) 
4832
{ 
4833
}
4834
void helper_invlpga(int aflag)
4835
{ 
4836
}
4837
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4838
{ 
4839
}
4840
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4841
{
4842
}
4843

    
4844
void helper_svm_check_io(uint32_t port, uint32_t param, 
4845
                         uint32_t next_eip_addend)
4846
{
4847
}
4848
#else
4849

    
4850
static inline void svm_save_seg(target_phys_addr_t addr,
4851
                                const SegmentCache *sc)
4852
{
4853
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4854
             sc->selector);
4855
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4856
             sc->base);
4857
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4858
             sc->limit);
4859
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4860
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4861
}
4862
                                
4863
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4864
{
4865
    unsigned int flags;
4866

    
4867
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4868
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4869
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4870
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4871
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4872
}
4873

    
4874
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4875
                                      CPUState *env, int seg_reg)
4876
{
4877
    SegmentCache sc1, *sc = &sc1;
4878
    svm_load_seg(addr, sc);
4879
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4880
                           sc->base, sc->limit, sc->flags);
4881
}
4882

    
4883
void helper_vmrun(int aflag, int next_eip_addend)
4884
{
4885
    target_ulong addr;
4886
    uint32_t event_inj;
4887
    uint32_t int_ctl;
4888

    
4889
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4890

    
4891
    if (aflag == 2)
4892
        addr = EAX;
4893
    else
4894
        addr = (uint32_t)EAX;
4895

    
4896
    if (loglevel & CPU_LOG_TB_IN_ASM)
4897
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4898

    
4899
    env->vm_vmcb = addr;
4900

    
4901
    /* save the current CPU state in the hsave page */
4902
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4903
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4904

    
4905
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4906
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4907

    
4908
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4909
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4910
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4911
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4912
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4913
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4914

    
4915
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4916
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4917

    
4918
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4919
                  &env->segs[R_ES]);
4920
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4921
                 &env->segs[R_CS]);
4922
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4923
                 &env->segs[R_SS]);
4924
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4925
                 &env->segs[R_DS]);
4926

    
4927
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4928
             EIP + next_eip_addend);
4929
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4930
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4931

    
4932
    /* load the interception bitmaps so we do not need to access the
4933
       vmcb in svm mode */
4934
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4935
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4936
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4937
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4938
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4939
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4940

    
4941
    /* enable intercepts */
4942
    env->hflags |= HF_SVMI_MASK;
4943

    
4944
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4945

    
4946
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4947
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4948

    
4949
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4950
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4951

    
4952
    /* clear exit_info_2 so we behave like the real hardware */
4953
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4954

    
4955
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4956
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4957
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4958
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4959
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4960
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4961
    if (int_ctl & V_INTR_MASKING_MASK) {
4962
        env->v_tpr = int_ctl & V_TPR_MASK;
4963
        env->hflags2 |= HF2_VINTR_MASK;
4964
        if (env->eflags & IF_MASK)
4965
            env->hflags2 |= HF2_HIF_MASK;
4966
    }
4967

    
4968
    cpu_load_efer(env, 
4969
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4970
    env->eflags = 0;
4971
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4972
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4973
    CC_OP = CC_OP_EFLAGS;
4974

    
4975
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4976
                       env, R_ES);
4977
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4978
                       env, R_CS);
4979
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4980
                       env, R_SS);
4981
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4982
                       env, R_DS);
4983

    
4984
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4985
    env->eip = EIP;
4986
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4987
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4988
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4989
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4990
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4991

    
4992
    /* FIXME: guest state consistency checks */
4993

    
4994
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4995
        case TLB_CONTROL_DO_NOTHING:
4996
            break;
4997
        case TLB_CONTROL_FLUSH_ALL_ASID:
4998
            /* FIXME: this is not 100% correct but should work for now */
4999
            tlb_flush(env, 1);
5000
        break;
5001
    }
5002

    
5003
    env->hflags2 |= HF2_GIF_MASK;
5004

    
5005
    if (int_ctl & V_IRQ_MASK) {
5006
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5007
    }
5008

    
5009
    /* maybe we need to inject an event */
5010
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5011
    if (event_inj & SVM_EVTINJ_VALID) {
5012
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5013
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5014
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5015
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
5016

    
5017
        if (loglevel & CPU_LOG_TB_IN_ASM)
5018
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
5019
        /* FIXME: need to implement valid_err */
5020
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5021
        case SVM_EVTINJ_TYPE_INTR:
5022
                env->exception_index = vector;
5023
                env->error_code = event_inj_err;
5024
                env->exception_is_int = 0;
5025
                env->exception_next_eip = -1;
5026
                if (loglevel & CPU_LOG_TB_IN_ASM)
5027
                    fprintf(logfile, "INTR");
5028
                /* XXX: is it always correct ? */
5029
                do_interrupt(vector, 0, 0, 0, 1);
5030
                break;
5031
        case SVM_EVTINJ_TYPE_NMI:
5032
                env->exception_index = EXCP02_NMI;
5033
                env->error_code = event_inj_err;
5034
                env->exception_is_int = 0;
5035
                env->exception_next_eip = EIP;
5036
                if (loglevel & CPU_LOG_TB_IN_ASM)
5037
                    fprintf(logfile, "NMI");
5038
                cpu_loop_exit();
5039
                break;
5040
        case SVM_EVTINJ_TYPE_EXEPT:
5041
                env->exception_index = vector;
5042
                env->error_code = event_inj_err;
5043
                env->exception_is_int = 0;
5044
                env->exception_next_eip = -1;
5045
                if (loglevel & CPU_LOG_TB_IN_ASM)
5046
                    fprintf(logfile, "EXEPT");
5047
                cpu_loop_exit();
5048
                break;
5049
        case SVM_EVTINJ_TYPE_SOFT:
5050
                env->exception_index = vector;
5051
                env->error_code = event_inj_err;
5052
                env->exception_is_int = 1;
5053
                env->exception_next_eip = EIP;
5054
                if (loglevel & CPU_LOG_TB_IN_ASM)
5055
                    fprintf(logfile, "SOFT");
5056
                cpu_loop_exit();
5057
                break;
5058
        }
5059
        if (loglevel & CPU_LOG_TB_IN_ASM)
5060
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
5061
    }
5062
}
5063

    
5064
void helper_vmmcall(void)
5065
{
5066
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5067
    raise_exception(EXCP06_ILLOP);
5068
}
5069

    
5070
void helper_vmload(int aflag)
5071
{
5072
    target_ulong addr;
5073
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5074

    
5075
    if (aflag == 2)
5076
        addr = EAX;
5077
    else
5078
        addr = (uint32_t)EAX;
5079

    
5080
    if (loglevel & CPU_LOG_TB_IN_ASM)
5081
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5082
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5083
                env->segs[R_FS].base);
5084

    
5085
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5086
                       env, R_FS);
5087
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5088
                       env, R_GS);
5089
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5090
                 &env->tr);
5091
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5092
                 &env->ldt);
5093

    
5094
#ifdef TARGET_X86_64
5095
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5096
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5097
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5098
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5099
#endif
5100
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5101
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5102
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5103
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5104
}
5105

    
5106
void helper_vmsave(int aflag)
5107
{
5108
    target_ulong addr;
5109
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5110

    
5111
    if (aflag == 2)
5112
        addr = EAX;
5113
    else
5114
        addr = (uint32_t)EAX;
5115

    
5116
    if (loglevel & CPU_LOG_TB_IN_ASM)
5117
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5118
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5119
                env->segs[R_FS].base);
5120

    
5121
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5122
                 &env->segs[R_FS]);
5123
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5124
                 &env->segs[R_GS]);
5125
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5126
                 &env->tr);
5127
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5128
                 &env->ldt);
5129

    
5130
#ifdef TARGET_X86_64
5131
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5132
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5133
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5134
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5135
#endif
5136
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5137
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5138
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5139
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5140
}
5141

    
5142
void helper_stgi(void)
5143
{
5144
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5145
    env->hflags2 |= HF2_GIF_MASK;
5146
}
5147

    
5148
void helper_clgi(void)
5149
{
5150
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5151
    env->hflags2 &= ~HF2_GIF_MASK;
5152
}
5153

    
5154
void helper_skinit(void)
5155
{
5156
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5157
    /* XXX: not implemented */
5158
    raise_exception(EXCP06_ILLOP);
5159
}
5160

    
5161
void helper_invlpga(int aflag)
5162
{
5163
    target_ulong addr;
5164
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5165
    
5166
    if (aflag == 2)
5167
        addr = EAX;
5168
    else
5169
        addr = (uint32_t)EAX;
5170

    
5171
    /* XXX: could use the ASID to see if it is needed to do the
5172
       flush */
5173
    tlb_flush_page(env, addr);
5174
}
5175

    
5176
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5177
{
5178
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5179
        return;
5180
    switch(type) {
5181
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5182
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5183
            helper_vmexit(type, param);
5184
        }
5185
        break;
5186
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5187
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5188
            helper_vmexit(type, param);
5189
        }
5190
        break;
5191
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5192
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5193
            helper_vmexit(type, param);
5194
        }
5195
        break;
5196
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5197
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5198
            helper_vmexit(type, param);
5199
        }
5200
        break;
5201
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5202
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5203
            helper_vmexit(type, param);
5204
        }
5205
        break;
5206
    case SVM_EXIT_MSR:
5207
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5208
            /* FIXME: this should be read in at vmrun (faster this way?) */
5209
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5210
            uint32_t t0, t1;
5211
            switch((uint32_t)ECX) {
5212
            case 0 ... 0x1fff:
5213
                t0 = (ECX * 2) % 8;
5214
                t1 = ECX / 8;
5215
                break;
5216
            case 0xc0000000 ... 0xc0001fff:
5217
                t0 = (8192 + ECX - 0xc0000000) * 2;
5218
                t1 = (t0 / 8);
5219
                t0 %= 8;
5220
                break;
5221
            case 0xc0010000 ... 0xc0011fff:
5222
                t0 = (16384 + ECX - 0xc0010000) * 2;
5223
                t1 = (t0 / 8);
5224
                t0 %= 8;
5225
                break;
5226
            default:
5227
                helper_vmexit(type, param);
5228
                t0 = 0;
5229
                t1 = 0;
5230
                break;
5231
            }
5232
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5233
                helper_vmexit(type, param);
5234
        }
5235
        break;
5236
    default:
5237
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5238
            helper_vmexit(type, param);
5239
        }
5240
        break;
5241
    }
5242
}
5243

    
5244
void helper_svm_check_io(uint32_t port, uint32_t param, 
5245
                         uint32_t next_eip_addend)
5246
{
5247
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5248
        /* FIXME: this should be read in at vmrun (faster this way?) */
5249
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5250
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5251
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5252
            /* next EIP */
5253
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5254
                     env->eip + next_eip_addend);
5255
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5256
        }
5257
    }
5258
}
5259

    
5260
/* Note: currently only 32 bits of exit_code are used */
5261
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5262
{
5263
    uint32_t int_ctl;
5264

    
5265
    if (loglevel & CPU_LOG_TB_IN_ASM)
5266
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5267
                exit_code, exit_info_1,
5268
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5269
                EIP);
5270

    
5271
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5272
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5273
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5274
    } else {
5275
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5276
    }
5277

    
5278
    /* Save the VM state in the vmcb */
5279
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5280
                 &env->segs[R_ES]);
5281
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5282
                 &env->segs[R_CS]);
5283
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5284
                 &env->segs[R_SS]);
5285
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5286
                 &env->segs[R_DS]);
5287

    
5288
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5289
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5290

    
5291
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5292
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5293

    
5294
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5295
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5296
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5297
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5298
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5299

    
5300
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5301
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5302
    int_ctl |= env->v_tpr & V_TPR_MASK;
5303
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5304
        int_ctl |= V_IRQ_MASK;
5305
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5306

    
5307
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5308
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5309
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5310
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5311
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5312
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5313
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5314

    
5315
    /* Reload the host state from vm_hsave */
5316
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5317
    env->hflags &= ~HF_SVMI_MASK;
5318
    env->intercept = 0;
5319
    env->intercept_exceptions = 0;
5320
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5321
    env->tsc_offset = 0;
5322

    
5323
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5324
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5325

    
5326
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5327
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5328

    
5329
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5330
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5331
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5332
    /* we need to set the efer after the crs so the hidden flags get
5333
       set properly */
5334
    cpu_load_efer(env, 
5335
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5336
    env->eflags = 0;
5337
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5338
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5339
    CC_OP = CC_OP_EFLAGS;
5340

    
5341
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5342
                       env, R_ES);
5343
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5344
                       env, R_CS);
5345
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5346
                       env, R_SS);
5347
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5348
                       env, R_DS);
5349

    
5350
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5351
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5352
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5353

    
5354
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5355
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5356

    
5357
    /* other setups */
5358
    cpu_x86_set_cpl(env, 0);
5359
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5360
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5361

    
5362
    env->hflags2 &= ~HF2_GIF_MASK;
5363
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5364

    
5365
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5366

    
5367
    /* Clears the TSC_OFFSET inside the processor. */
5368

    
5369
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5370
       from the page table indicated the host's CR3. If the PDPEs contain
5371
       illegal state, the processor causes a shutdown. */
5372

    
5373
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5374
    env->cr[0] |= CR0_PE_MASK;
5375
    env->eflags &= ~VM_MASK;
5376

    
5377
    /* Disables all breakpoints in the host DR7 register. */
5378

    
5379
    /* Checks the reloaded host state for consistency. */
5380

    
5381
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5382
       host's code segment or non-canonical (in the case of long mode), a
5383
       #GP fault is delivered inside the host.) */
5384

    
5385
    /* remove any pending exception */
5386
    env->exception_index = -1;
5387
    env->error_code = 0;
5388
    env->old_exception = -1;
5389

    
5390
    cpu_loop_exit();
5391
}
5392

    
5393
#endif
5394

    
5395
/* MMX/SSE */
5396
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5397
void helper_enter_mmx(void)
5398
{
5399
    env->fpstt = 0;
5400
    *(uint32_t *)(env->fptags) = 0;
5401
    *(uint32_t *)(env->fptags + 4) = 0;
5402
}
5403

    
5404
void helper_emms(void)
5405
{
5406
    /* set to empty state */
5407
    *(uint32_t *)(env->fptags) = 0x01010101;
5408
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5409
}
5410

    
5411
/* XXX: suppress */
5412
void helper_movq(uint64_t *d, uint64_t *s)
5413
{
5414
    *d = *s;
5415
}
5416

    
5417
#define SHIFT 0
5418
#include "ops_sse.h"
5419

    
5420
#define SHIFT 1
5421
#include "ops_sse.h"
5422

    
5423
#define SHIFT 0
5424
#include "helper_template.h"
5425
#undef SHIFT
5426

    
5427
#define SHIFT 1
5428
#include "helper_template.h"
5429
#undef SHIFT
5430

    
5431
#define SHIFT 2
5432
#include "helper_template.h"
5433
#undef SHIFT
5434

    
5435
#ifdef TARGET_X86_64
5436

    
5437
#define SHIFT 3
5438
#include "helper_template.h"
5439
#undef SHIFT
5440

    
5441
#endif
5442

    
5443
/* bit operations */
5444
target_ulong helper_bsf(target_ulong t0)
5445
{
5446
    int count;
5447
    target_ulong res;
5448

    
5449
    res = t0;
5450
    count = 0;
5451
    while ((res & 1) == 0) {
5452
        count++;
5453
        res >>= 1;
5454
    }
5455
    return count;
5456
}
5457

    
5458
target_ulong helper_bsr(target_ulong t0)
5459
{
5460
    int count;
5461
    target_ulong res, mask;
5462
    
5463
    res = t0;
5464
    count = TARGET_LONG_BITS - 1;
5465
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5466
    while ((res & mask) == 0) {
5467
        count--;
5468
        res <<= 1;
5469
    }
5470
    return count;
5471
}
5472

    
5473

    
5474
static int compute_all_eflags(void)
5475
{
5476
    return CC_SRC;
5477
}
5478

    
5479
static int compute_c_eflags(void)
5480
{
5481
    return CC_SRC & CC_C;
5482
}
5483

    
5484
CCTable cc_table[CC_OP_NB] = {
5485
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5486

    
5487
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5488

    
5489
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5490
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5491
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5492

    
5493
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5494
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5495
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5496

    
5497
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5498
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5499
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5500

    
5501
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5502
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5503
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5504

    
5505
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5506
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5507
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5508

    
5509
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5510
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5511
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5512

    
5513
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5514
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5515
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5516

    
5517
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5518
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5519
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5520

    
5521
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5522
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5523
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5524

    
5525
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5526
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5527
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5528

    
5529
#ifdef TARGET_X86_64
5530
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5531

    
5532
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5533

    
5534
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5535

    
5536
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5537

    
5538
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5539

    
5540
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5541

    
5542
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5543

    
5544
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5545

    
5546
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5547

    
5548
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5549
#endif
5550
};
5551