Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ e65bdffa

History | View | Annotate | Download (155.7 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26
#if 0
27
#define raise_exception_err(a, b)\
28
do {\
29
    if (logfile)\
30
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31
    (raise_exception_err)(a, b);\
32
} while (0)
33
#endif
34

    
35
const uint8_t parity_table[256] = {
36
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
};
69

    
70
/* modulo 17 table */
71
const uint8_t rclw_table[32] = {
72
    0, 1, 2, 3, 4, 5, 6, 7,
73
    8, 9,10,11,12,13,14,15,
74
   16, 0, 1, 2, 3, 4, 5, 6,
75
    7, 8, 9,10,11,12,13,14,
76
};
77

    
78
/* modulo 9 table */
79
const uint8_t rclb_table[32] = {
80
    0, 1, 2, 3, 4, 5, 6, 7,
81
    8, 0, 1, 2, 3, 4, 5, 6,
82
    7, 8, 0, 1, 2, 3, 4, 5,
83
    6, 7, 8, 0, 1, 2, 3, 4,
84
};
85

    
86
const CPU86_LDouble f15rk[7] =
87
{
88
    0.00000000000000000000L,
89
    1.00000000000000000000L,
90
    3.14159265358979323851L,  /*pi*/
91
    0.30102999566398119523L,  /*lg2*/
92
    0.69314718055994530943L,  /*ln2*/
93
    1.44269504088896340739L,  /*l2e*/
94
    3.32192809488736234781L,  /*l2t*/
95
};
96

    
97
/* broken thread support */
98

    
99
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100

    
101
void helper_lock(void)
102
{
103
    spin_lock(&global_cpu_lock);
104
}
105

    
106
void helper_unlock(void)
107
{
108
    spin_unlock(&global_cpu_lock);
109
}
110

    
111
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112
{
113
    load_eflags(t0, update_mask);
114
}
115

    
116
target_ulong helper_read_eflags(void)
117
{
118
    uint32_t eflags;
119
    eflags = cc_table[CC_OP].compute_all();
120
    eflags |= (DF & DF_MASK);
121
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122
    return eflags;
123
}
124

    
125
/* return non zero if error */
126
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127
                               int selector)
128
{
129
    SegmentCache *dt;
130
    int index;
131
    target_ulong ptr;
132

    
133
    if (selector & 0x4)
134
        dt = &env->ldt;
135
    else
136
        dt = &env->gdt;
137
    index = selector & ~7;
138
    if ((index + 7) > dt->limit)
139
        return -1;
140
    ptr = dt->base + index;
141
    *e1_ptr = ldl_kernel(ptr);
142
    *e2_ptr = ldl_kernel(ptr + 4);
143
    return 0;
144
}
145

    
146
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147
{
148
    unsigned int limit;
149
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150
    if (e2 & DESC_G_MASK)
151
        limit = (limit << 12) | 0xfff;
152
    return limit;
153
}
154

    
155
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156
{
157
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158
}
159

    
160
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161
{
162
    sc->base = get_seg_base(e1, e2);
163
    sc->limit = get_seg_limit(e1, e2);
164
    sc->flags = e2;
165
}
166

    
167
/* init the segment cache in vm86 mode. */
168
static inline void load_seg_vm(int seg, int selector)
169
{
170
    selector &= 0xffff;
171
    cpu_x86_load_seg_cache(env, seg, selector,
172
                           (selector << 4), 0xffff, 0);
173
}
174

    
175
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176
                                       uint32_t *esp_ptr, int dpl)
177
{
178
    int type, index, shift;
179

    
180
#if 0
181
    {
182
        int i;
183
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184
        for(i=0;i<env->tr.limit;i++) {
185
            printf("%02x ", env->tr.base[i]);
186
            if ((i & 7) == 7) printf("\n");
187
        }
188
        printf("\n");
189
    }
190
#endif
191

    
192
    if (!(env->tr.flags & DESC_P_MASK))
193
        cpu_abort(env, "invalid tss");
194
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195
    if ((type & 7) != 1)
196
        cpu_abort(env, "invalid tss type");
197
    shift = type >> 3;
198
    index = (dpl * 4 + 2) << shift;
199
    if (index + (4 << shift) - 1 > env->tr.limit)
200
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201
    if (shift == 0) {
202
        *esp_ptr = lduw_kernel(env->tr.base + index);
203
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204
    } else {
205
        *esp_ptr = ldl_kernel(env->tr.base + index);
206
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207
    }
208
}
209

    
210
/* XXX: merge with load_seg() */
211
static void tss_load_seg(int seg_reg, int selector)
212
{
213
    uint32_t e1, e2;
214
    int rpl, dpl, cpl;
215

    
216
    if ((selector & 0xfffc) != 0) {
217
        if (load_segment(&e1, &e2, selector) != 0)
218
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219
        if (!(e2 & DESC_S_MASK))
220
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
        rpl = selector & 3;
222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223
        cpl = env->hflags & HF_CPL_MASK;
224
        if (seg_reg == R_CS) {
225
            if (!(e2 & DESC_CS_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* XXX: is it correct ? */
228
            if (dpl != rpl)
229
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            if ((e2 & DESC_C_MASK) && dpl > rpl)
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        } else if (seg_reg == R_SS) {
233
            /* SS must be writable data */
234
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
            if (dpl != cpl || dpl != rpl)
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        } else {
239
            /* not readable code */
240
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            /* if data or non conforming code, checks the rights */
243
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244
                if (dpl < cpl || dpl < rpl)
245
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            }
247
        }
248
        if (!(e2 & DESC_P_MASK))
249
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250
        cpu_x86_load_seg_cache(env, seg_reg, selector,
251
                       get_seg_base(e1, e2),
252
                       get_seg_limit(e1, e2),
253
                       e2);
254
    } else {
255
        if (seg_reg == R_SS || seg_reg == R_CS)
256
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
    }
258
}
259

    
260
#define SWITCH_TSS_JMP  0
261
#define SWITCH_TSS_IRET 1
262
#define SWITCH_TSS_CALL 2
263

    
264
/* XXX: restore CPU state in registers (PowerPC case) */
265
static void switch_tss(int tss_selector,
266
                       uint32_t e1, uint32_t e2, int source,
267
                       uint32_t next_eip)
268
{
269
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270
    target_ulong tss_base;
271
    uint32_t new_regs[8], new_segs[6];
272
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273
    uint32_t old_eflags, eflags_mask;
274
    SegmentCache *dt;
275
    int index;
276
    target_ulong ptr;
277

    
278
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279
#ifdef DEBUG_PCALL
280
    if (loglevel & CPU_LOG_PCALL)
281
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282
#endif
283

    
284
    /* if task gate, we read the TSS segment and we load it */
285
    if (type == 5) {
286
        if (!(e2 & DESC_P_MASK))
287
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
        tss_selector = e1 >> 16;
289
        if (tss_selector & 4)
290
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291
        if (load_segment(&e1, &e2, tss_selector) != 0)
292
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293
        if (e2 & DESC_S_MASK)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
        if ((type & 7) != 1)
297
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298
    }
299

    
300
    if (!(e2 & DESC_P_MASK))
301
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302

    
303
    if (type & 8)
304
        tss_limit_max = 103;
305
    else
306
        tss_limit_max = 43;
307
    tss_limit = get_seg_limit(e1, e2);
308
    tss_base = get_seg_base(e1, e2);
309
    if ((tss_selector & 4) != 0 ||
310
        tss_limit < tss_limit_max)
311
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313
    if (old_type & 8)
314
        old_tss_limit_max = 103;
315
    else
316
        old_tss_limit_max = 43;
317

    
318
    /* read all the registers from the new TSS */
319
    if (type & 8) {
320
        /* 32 bit */
321
        new_cr3 = ldl_kernel(tss_base + 0x1c);
322
        new_eip = ldl_kernel(tss_base + 0x20);
323
        new_eflags = ldl_kernel(tss_base + 0x24);
324
        for(i = 0; i < 8; i++)
325
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326
        for(i = 0; i < 6; i++)
327
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328
        new_ldt = lduw_kernel(tss_base + 0x60);
329
        new_trap = ldl_kernel(tss_base + 0x64);
330
    } else {
331
        /* 16 bit */
332
        new_cr3 = 0;
333
        new_eip = lduw_kernel(tss_base + 0x0e);
334
        new_eflags = lduw_kernel(tss_base + 0x10);
335
        for(i = 0; i < 8; i++)
336
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337
        for(i = 0; i < 4; i++)
338
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339
        new_ldt = lduw_kernel(tss_base + 0x2a);
340
        new_segs[R_FS] = 0;
341
        new_segs[R_GS] = 0;
342
        new_trap = 0;
343
    }
344

    
345
    /* NOTE: we must avoid memory exceptions during the task switch,
346
       so we make dummy accesses before */
347
    /* XXX: it can still fail in some cases, so a bigger hack is
348
       necessary to valid the TLB after having done the accesses */
349

    
350
    v1 = ldub_kernel(env->tr.base);
351
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352
    stb_kernel(env->tr.base, v1);
353
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
354

    
355
    /* clear busy bit (it is restartable) */
356
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357
        target_ulong ptr;
358
        uint32_t e2;
359
        ptr = env->gdt.base + (env->tr.selector & ~7);
360
        e2 = ldl_kernel(ptr + 4);
361
        e2 &= ~DESC_TSS_BUSY_MASK;
362
        stl_kernel(ptr + 4, e2);
363
    }
364
    old_eflags = compute_eflags();
365
    if (source == SWITCH_TSS_IRET)
366
        old_eflags &= ~NT_MASK;
367

    
368
    /* save the current state in the old TSS */
369
    if (type & 8) {
370
        /* 32 bit */
371
        stl_kernel(env->tr.base + 0x20, next_eip);
372
        stl_kernel(env->tr.base + 0x24, old_eflags);
373
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381
        for(i = 0; i < 6; i++)
382
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383
    } else {
384
        /* 16 bit */
385
        stw_kernel(env->tr.base + 0x0e, next_eip);
386
        stw_kernel(env->tr.base + 0x10, old_eflags);
387
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395
        for(i = 0; i < 4; i++)
396
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397
    }
398

    
399
    /* now if an exception occurs, it will occurs in the next task
400
       context */
401

    
402
    if (source == SWITCH_TSS_CALL) {
403
        stw_kernel(tss_base, env->tr.selector);
404
        new_eflags |= NT_MASK;
405
    }
406

    
407
    /* set busy bit */
408
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409
        target_ulong ptr;
410
        uint32_t e2;
411
        ptr = env->gdt.base + (tss_selector & ~7);
412
        e2 = ldl_kernel(ptr + 4);
413
        e2 |= DESC_TSS_BUSY_MASK;
414
        stl_kernel(ptr + 4, e2);
415
    }
416

    
417
    /* set the new CPU state */
418
    /* from this point, any exception which occurs can give problems */
419
    env->cr[0] |= CR0_TS_MASK;
420
    env->hflags |= HF_TS_MASK;
421
    env->tr.selector = tss_selector;
422
    env->tr.base = tss_base;
423
    env->tr.limit = tss_limit;
424
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425

    
426
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427
        cpu_x86_update_cr3(env, new_cr3);
428
    }
429

    
430
    /* load all registers without an exception, then reload them with
431
       possible exception */
432
    env->eip = new_eip;
433
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435
    if (!(type & 8))
436
        eflags_mask &= 0xffff;
437
    load_eflags(new_eflags, eflags_mask);
438
    /* XXX: what to do in 16 bit case ? */
439
    EAX = new_regs[0];
440
    ECX = new_regs[1];
441
    EDX = new_regs[2];
442
    EBX = new_regs[3];
443
    ESP = new_regs[4];
444
    EBP = new_regs[5];
445
    ESI = new_regs[6];
446
    EDI = new_regs[7];
447
    if (new_eflags & VM_MASK) {
448
        for(i = 0; i < 6; i++)
449
            load_seg_vm(i, new_segs[i]);
450
        /* in vm86, CPL is always 3 */
451
        cpu_x86_set_cpl(env, 3);
452
    } else {
453
        /* CPL is set the RPL of CS */
454
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455
        /* first just selectors as the rest may trigger exceptions */
456
        for(i = 0; i < 6; i++)
457
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458
    }
459

    
460
    env->ldt.selector = new_ldt & ~4;
461
    env->ldt.base = 0;
462
    env->ldt.limit = 0;
463
    env->ldt.flags = 0;
464

    
465
    /* load the LDT */
466
    if (new_ldt & 4)
467
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468

    
469
    if ((new_ldt & 0xfffc) != 0) {
470
        dt = &env->gdt;
471
        index = new_ldt & ~7;
472
        if ((index + 7) > dt->limit)
473
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
        ptr = dt->base + index;
475
        e1 = ldl_kernel(ptr);
476
        e2 = ldl_kernel(ptr + 4);
477
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479
        if (!(e2 & DESC_P_MASK))
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
482
    }
483

    
484
    /* load the segments */
485
    if (!(new_eflags & VM_MASK)) {
486
        tss_load_seg(R_CS, new_segs[R_CS]);
487
        tss_load_seg(R_SS, new_segs[R_SS]);
488
        tss_load_seg(R_ES, new_segs[R_ES]);
489
        tss_load_seg(R_DS, new_segs[R_DS]);
490
        tss_load_seg(R_FS, new_segs[R_FS]);
491
        tss_load_seg(R_GS, new_segs[R_GS]);
492
    }
493

    
494
    /* check that EIP is in the CS segment limits */
495
    if (new_eip > env->segs[R_CS].limit) {
496
        /* XXX: different exception if CALL ? */
497
        raise_exception_err(EXCP0D_GPF, 0);
498
    }
499
}
500

    
501
/* check if Port I/O is allowed in TSS */
502
static inline void check_io(int addr, int size)
503
{
504
    int io_offset, val, mask;
505

    
506
    /* TSS must be a valid 32 bit one */
507
    if (!(env->tr.flags & DESC_P_MASK) ||
508
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509
        env->tr.limit < 103)
510
        goto fail;
511
    io_offset = lduw_kernel(env->tr.base + 0x66);
512
    io_offset += (addr >> 3);
513
    /* Note: the check needs two bytes */
514
    if ((io_offset + 1) > env->tr.limit)
515
        goto fail;
516
    val = lduw_kernel(env->tr.base + io_offset);
517
    val >>= (addr & 7);
518
    mask = (1 << size) - 1;
519
    /* all bits must be zero to allow the I/O */
520
    if ((val & mask) != 0) {
521
    fail:
522
        raise_exception_err(EXCP0D_GPF, 0);
523
    }
524
}
525

    
526
void helper_check_iob(uint32_t t0)
527
{
528
    check_io(t0, 1);
529
}
530

    
531
void helper_check_iow(uint32_t t0)
532
{
533
    check_io(t0, 2);
534
}
535

    
536
void helper_check_iol(uint32_t t0)
537
{
538
    check_io(t0, 4);
539
}
540

    
541
void helper_outb(uint32_t port, uint32_t data)
542
{
543
    cpu_outb(env, port, data & 0xff);
544
}
545

    
546
target_ulong helper_inb(uint32_t port)
547
{
548
    return cpu_inb(env, port);
549
}
550

    
551
void helper_outw(uint32_t port, uint32_t data)
552
{
553
    cpu_outw(env, port, data & 0xffff);
554
}
555

    
556
target_ulong helper_inw(uint32_t port)
557
{
558
    return cpu_inw(env, port);
559
}
560

    
561
void helper_outl(uint32_t port, uint32_t data)
562
{
563
    cpu_outl(env, port, data);
564
}
565

    
566
target_ulong helper_inl(uint32_t port)
567
{
568
    return cpu_inl(env, port);
569
}
570

    
571
static inline unsigned int get_sp_mask(unsigned int e2)
572
{
573
    if (e2 & DESC_B_MASK)
574
        return 0xffffffff;
575
    else
576
        return 0xffff;
577
}
578

    
579
#ifdef TARGET_X86_64
580
#define SET_ESP(val, sp_mask)\
581
do {\
582
    if ((sp_mask) == 0xffff)\
583
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584
    else if ((sp_mask) == 0xffffffffLL)\
585
        ESP = (uint32_t)(val);\
586
    else\
587
        ESP = (val);\
588
} while (0)
589
#else
590
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591
#endif
592

    
593
/* in 64-bit machines, this can overflow. So this segment addition macro
594
 * can be used to trim the value to 32-bit whenever needed */
595
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
596

    
597
/* XXX: add a is_user flag to have proper security support */
598
#define PUSHW(ssp, sp, sp_mask, val)\
599
{\
600
    sp -= 2;\
601
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
602
}
603

    
604
#define PUSHL(ssp, sp, sp_mask, val)\
605
{\
606
    sp -= 4;\
607
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
608
}
609

    
610
#define POPW(ssp, sp, sp_mask, val)\
611
{\
612
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613
    sp += 2;\
614
}
615

    
616
#define POPL(ssp, sp, sp_mask, val)\
617
{\
618
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
619
    sp += 4;\
620
}
621

    
622
/* protected mode interrupt */
623
static void do_interrupt_protected(int intno, int is_int, int error_code,
624
                                   unsigned int next_eip, int is_hw)
625
{
626
    SegmentCache *dt;
627
    target_ulong ptr, ssp;
628
    int type, dpl, selector, ss_dpl, cpl;
629
    int has_error_code, new_stack, shift;
630
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631
    uint32_t old_eip, sp_mask;
632

    
633
    has_error_code = 0;
634
    if (!is_int && !is_hw) {
635
        switch(intno) {
636
        case 8:
637
        case 10:
638
        case 11:
639
        case 12:
640
        case 13:
641
        case 14:
642
        case 17:
643
            has_error_code = 1;
644
            break;
645
        }
646
    }
647
    if (is_int)
648
        old_eip = next_eip;
649
    else
650
        old_eip = env->eip;
651

    
652
    dt = &env->idt;
653
    if (intno * 8 + 7 > dt->limit)
654
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655
    ptr = dt->base + intno * 8;
656
    e1 = ldl_kernel(ptr);
657
    e2 = ldl_kernel(ptr + 4);
658
    /* check gate type */
659
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660
    switch(type) {
661
    case 5: /* task gate */
662
        /* must do that check here to return the correct error code */
663
        if (!(e2 & DESC_P_MASK))
664
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666
        if (has_error_code) {
667
            int type;
668
            uint32_t mask;
669
            /* push the error code */
670
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671
            shift = type >> 3;
672
            if (env->segs[R_SS].flags & DESC_B_MASK)
673
                mask = 0xffffffff;
674
            else
675
                mask = 0xffff;
676
            esp = (ESP - (2 << shift)) & mask;
677
            ssp = env->segs[R_SS].base + esp;
678
            if (shift)
679
                stl_kernel(ssp, error_code);
680
            else
681
                stw_kernel(ssp, error_code);
682
            SET_ESP(esp, mask);
683
        }
684
        return;
685
    case 6: /* 286 interrupt gate */
686
    case 7: /* 286 trap gate */
687
    case 14: /* 386 interrupt gate */
688
    case 15: /* 386 trap gate */
689
        break;
690
    default:
691
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692
        break;
693
    }
694
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695
    cpl = env->hflags & HF_CPL_MASK;
696
    /* check privilege if software int */
697
    if (is_int && dpl < cpl)
698
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699
    /* check valid bit */
700
    if (!(e2 & DESC_P_MASK))
701
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702
    selector = e1 >> 16;
703
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704
    if ((selector & 0xfffc) == 0)
705
        raise_exception_err(EXCP0D_GPF, 0);
706

    
707
    if (load_segment(&e1, &e2, selector) != 0)
708
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712
    if (dpl > cpl)
713
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714
    if (!(e2 & DESC_P_MASK))
715
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717
        /* to inner privilege */
718
        get_ss_esp_from_tss(&ss, &esp, dpl);
719
        if ((ss & 0xfffc) == 0)
720
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721
        if ((ss & 3) != dpl)
722
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726
        if (ss_dpl != dpl)
727
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728
        if (!(ss_e2 & DESC_S_MASK) ||
729
            (ss_e2 & DESC_CS_MASK) ||
730
            !(ss_e2 & DESC_W_MASK))
731
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732
        if (!(ss_e2 & DESC_P_MASK))
733
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734
        new_stack = 1;
735
        sp_mask = get_sp_mask(ss_e2);
736
        ssp = get_seg_base(ss_e1, ss_e2);
737
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738
        /* to same privilege */
739
        if (env->eflags & VM_MASK)
740
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
        new_stack = 0;
742
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
743
        ssp = env->segs[R_SS].base;
744
        esp = ESP;
745
        dpl = cpl;
746
    } else {
747
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748
        new_stack = 0; /* avoid warning */
749
        sp_mask = 0; /* avoid warning */
750
        ssp = 0; /* avoid warning */
751
        esp = 0; /* avoid warning */
752
    }
753

    
754
    shift = type >> 3;
755

    
756
#if 0
757
    /* XXX: check that enough room is available */
758
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759
    if (env->eflags & VM_MASK)
760
        push_size += 8;
761
    push_size <<= shift;
762
#endif
763
    if (shift == 1) {
764
        if (new_stack) {
765
            if (env->eflags & VM_MASK) {
766
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
770
            }
771
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772
            PUSHL(ssp, esp, sp_mask, ESP);
773
        }
774
        PUSHL(ssp, esp, sp_mask, compute_eflags());
775
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776
        PUSHL(ssp, esp, sp_mask, old_eip);
777
        if (has_error_code) {
778
            PUSHL(ssp, esp, sp_mask, error_code);
779
        }
780
    } else {
781
        if (new_stack) {
782
            if (env->eflags & VM_MASK) {
783
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
787
            }
788
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789
            PUSHW(ssp, esp, sp_mask, ESP);
790
        }
791
        PUSHW(ssp, esp, sp_mask, compute_eflags());
792
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793
        PUSHW(ssp, esp, sp_mask, old_eip);
794
        if (has_error_code) {
795
            PUSHW(ssp, esp, sp_mask, error_code);
796
        }
797
    }
798

    
799
    if (new_stack) {
800
        if (env->eflags & VM_MASK) {
801
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
805
        }
806
        ss = (ss & ~3) | dpl;
807
        cpu_x86_load_seg_cache(env, R_SS, ss,
808
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
809
    }
810
    SET_ESP(esp, sp_mask);
811

    
812
    selector = (selector & ~3) | dpl;
813
    cpu_x86_load_seg_cache(env, R_CS, selector,
814
                   get_seg_base(e1, e2),
815
                   get_seg_limit(e1, e2),
816
                   e2);
817
    cpu_x86_set_cpl(env, dpl);
818
    env->eip = offset;
819

    
820
    /* interrupt gate clear IF mask */
821
    if ((type & 1) == 0) {
822
        env->eflags &= ~IF_MASK;
823
    }
824
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825
}
826

    
827
#ifdef TARGET_X86_64
828

    
829
#define PUSHQ(sp, val)\
830
{\
831
    sp -= 8;\
832
    stq_kernel(sp, (val));\
833
}
834

    
835
#define POPQ(sp, val)\
836
{\
837
    val = ldq_kernel(sp);\
838
    sp += 8;\
839
}
840

    
841
static inline target_ulong get_rsp_from_tss(int level)
842
{
843
    int index;
844

    
845
#if 0
846
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847
           env->tr.base, env->tr.limit);
848
#endif
849

    
850
    if (!(env->tr.flags & DESC_P_MASK))
851
        cpu_abort(env, "invalid tss");
852
    index = 8 * level + 4;
853
    if ((index + 7) > env->tr.limit)
854
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855
    return ldq_kernel(env->tr.base + index);
856
}
857

    
858
/* 64 bit interrupt */
859
static void do_interrupt64(int intno, int is_int, int error_code,
860
                           target_ulong next_eip, int is_hw)
861
{
862
    SegmentCache *dt;
863
    target_ulong ptr;
864
    int type, dpl, selector, cpl, ist;
865
    int has_error_code, new_stack;
866
    uint32_t e1, e2, e3, ss;
867
    target_ulong old_eip, esp, offset;
868

    
869
    has_error_code = 0;
870
    if (!is_int && !is_hw) {
871
        switch(intno) {
872
        case 8:
873
        case 10:
874
        case 11:
875
        case 12:
876
        case 13:
877
        case 14:
878
        case 17:
879
            has_error_code = 1;
880
            break;
881
        }
882
    }
883
    if (is_int)
884
        old_eip = next_eip;
885
    else
886
        old_eip = env->eip;
887

    
888
    dt = &env->idt;
889
    if (intno * 16 + 15 > dt->limit)
890
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891
    ptr = dt->base + intno * 16;
892
    e1 = ldl_kernel(ptr);
893
    e2 = ldl_kernel(ptr + 4);
894
    e3 = ldl_kernel(ptr + 8);
895
    /* check gate type */
896
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897
    switch(type) {
898
    case 14: /* 386 interrupt gate */
899
    case 15: /* 386 trap gate */
900
        break;
901
    default:
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
        break;
904
    }
905
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906
    cpl = env->hflags & HF_CPL_MASK;
907
    /* check privilege if software int */
908
    if (is_int && dpl < cpl)
909
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910
    /* check valid bit */
911
    if (!(e2 & DESC_P_MASK))
912
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913
    selector = e1 >> 16;
914
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915
    ist = e2 & 7;
916
    if ((selector & 0xfffc) == 0)
917
        raise_exception_err(EXCP0D_GPF, 0);
918

    
919
    if (load_segment(&e1, &e2, selector) != 0)
920
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924
    if (dpl > cpl)
925
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926
    if (!(e2 & DESC_P_MASK))
927
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931
        /* to inner privilege */
932
        if (ist != 0)
933
            esp = get_rsp_from_tss(ist + 3);
934
        else
935
            esp = get_rsp_from_tss(dpl);
936
        esp &= ~0xfLL; /* align stack */
937
        ss = 0;
938
        new_stack = 1;
939
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940
        /* to same privilege */
941
        if (env->eflags & VM_MASK)
942
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
        new_stack = 0;
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = ESP;
948
        esp &= ~0xfLL; /* align stack */
949
        dpl = cpl;
950
    } else {
951
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952
        new_stack = 0; /* avoid warning */
953
        esp = 0; /* avoid warning */
954
    }
955

    
956
    PUSHQ(esp, env->segs[R_SS].selector);
957
    PUSHQ(esp, ESP);
958
    PUSHQ(esp, compute_eflags());
959
    PUSHQ(esp, env->segs[R_CS].selector);
960
    PUSHQ(esp, old_eip);
961
    if (has_error_code) {
962
        PUSHQ(esp, error_code);
963
    }
964

    
965
    if (new_stack) {
966
        ss = 0 | dpl;
967
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
968
    }
969
    ESP = esp;
970

    
971
    selector = (selector & ~3) | dpl;
972
    cpu_x86_load_seg_cache(env, R_CS, selector,
973
                   get_seg_base(e1, e2),
974
                   get_seg_limit(e1, e2),
975
                   e2);
976
    cpu_x86_set_cpl(env, dpl);
977
    env->eip = offset;
978

    
979
    /* interrupt gate clear IF mask */
980
    if ((type & 1) == 0) {
981
        env->eflags &= ~IF_MASK;
982
    }
983
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
984
}
985
#endif
986

    
987
#if defined(CONFIG_USER_ONLY)
988
void helper_syscall(int next_eip_addend)
989
{
990
    env->exception_index = EXCP_SYSCALL;
991
    env->exception_next_eip = env->eip + next_eip_addend;
992
    cpu_loop_exit();
993
}
994
#else
995
void helper_syscall(int next_eip_addend)
996
{
997
    int selector;
998

    
999
    if (!(env->efer & MSR_EFER_SCE)) {
1000
        raise_exception_err(EXCP06_ILLOP, 0);
1001
    }
1002
    selector = (env->star >> 32) & 0xffff;
1003
#ifdef TARGET_X86_64
1004
    if (env->hflags & HF_LMA_MASK) {
1005
        int code64;
1006

    
1007
        ECX = env->eip + next_eip_addend;
1008
        env->regs[11] = compute_eflags();
1009

    
1010
        code64 = env->hflags & HF_CS64_MASK;
1011

    
1012
        cpu_x86_set_cpl(env, 0);
1013
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014
                           0, 0xffffffff,
1015
                               DESC_G_MASK | DESC_P_MASK |
1016
                               DESC_S_MASK |
1017
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019
                               0, 0xffffffff,
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK |
1022
                               DESC_W_MASK | DESC_A_MASK);
1023
        env->eflags &= ~env->fmask;
1024
        load_eflags(env->eflags, 0);
1025
        if (code64)
1026
            env->eip = env->lstar;
1027
        else
1028
            env->eip = env->cstar;
1029
    } else
1030
#endif
1031
    {
1032
        ECX = (uint32_t)(env->eip + next_eip_addend);
1033

    
1034
        cpu_x86_set_cpl(env, 0);
1035
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036
                           0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041
                               0, 0xffffffff,
1042
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043
                               DESC_S_MASK |
1044
                               DESC_W_MASK | DESC_A_MASK);
1045
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046
        env->eip = (uint32_t)env->star;
1047
    }
1048
}
1049
#endif
1050

    
1051
void helper_sysret(int dflag)
1052
{
1053
    int cpl, selector;
1054

    
1055
    if (!(env->efer & MSR_EFER_SCE)) {
1056
        raise_exception_err(EXCP06_ILLOP, 0);
1057
    }
1058
    cpl = env->hflags & HF_CPL_MASK;
1059
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060
        raise_exception_err(EXCP0D_GPF, 0);
1061
    }
1062
    selector = (env->star >> 48) & 0xffff;
1063
#ifdef TARGET_X86_64
1064
    if (env->hflags & HF_LMA_MASK) {
1065
        if (dflag == 2) {
1066
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067
                                   0, 0xffffffff,
1068
                                   DESC_G_MASK | DESC_P_MASK |
1069
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071
                                   DESC_L_MASK);
1072
            env->eip = ECX;
1073
        } else {
1074
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075
                                   0, 0xffffffff,
1076
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079
            env->eip = (uint32_t)ECX;
1080
        }
1081
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082
                               0, 0xffffffff,
1083
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                               DESC_W_MASK | DESC_A_MASK);
1086
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088
        cpu_x86_set_cpl(env, 3);
1089
    } else
1090
#endif
1091
    {
1092
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097
        env->eip = (uint32_t)ECX;
1098
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099
                               0, 0xffffffff,
1100
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                               DESC_W_MASK | DESC_A_MASK);
1103
        env->eflags |= IF_MASK;
1104
        cpu_x86_set_cpl(env, 3);
1105
    }
1106
#ifdef USE_KQEMU
1107
    if (kqemu_is_ok(env)) {
1108
        if (env->hflags & HF_LMA_MASK)
1109
            CC_OP = CC_OP_EFLAGS;
1110
        env->exception_index = -1;
1111
        cpu_loop_exit();
1112
    }
1113
#endif
1114
}
1115

    
1116
/* real mode interrupt */
1117
static void do_interrupt_real(int intno, int is_int, int error_code,
1118
                              unsigned int next_eip)
1119
{
1120
    SegmentCache *dt;
1121
    target_ulong ptr, ssp;
1122
    int selector;
1123
    uint32_t offset, esp;
1124
    uint32_t old_cs, old_eip;
1125

    
1126
    /* real mode (simpler !) */
1127
    dt = &env->idt;
1128
    if (intno * 4 + 3 > dt->limit)
1129
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130
    ptr = dt->base + intno * 4;
1131
    offset = lduw_kernel(ptr);
1132
    selector = lduw_kernel(ptr + 2);
1133
    esp = ESP;
1134
    ssp = env->segs[R_SS].base;
1135
    if (is_int)
1136
        old_eip = next_eip;
1137
    else
1138
        old_eip = env->eip;
1139
    old_cs = env->segs[R_CS].selector;
1140
    /* XXX: use SS segment size ? */
1141
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1142
    PUSHW(ssp, esp, 0xffff, old_cs);
1143
    PUSHW(ssp, esp, 0xffff, old_eip);
1144

    
1145
    /* update processor state */
1146
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147
    env->eip = offset;
1148
    env->segs[R_CS].selector = selector;
1149
    env->segs[R_CS].base = (selector << 4);
1150
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151
}
1152

    
1153
/* fake user mode interrupt */
1154
void do_interrupt_user(int intno, int is_int, int error_code,
1155
                       target_ulong next_eip)
1156
{
1157
    SegmentCache *dt;
1158
    target_ulong ptr;
1159
    int dpl, cpl, shift;
1160
    uint32_t e2;
1161

    
1162
    dt = &env->idt;
1163
    if (env->hflags & HF_LMA_MASK) {
1164
        shift = 4;
1165
    } else {
1166
        shift = 3;
1167
    }
1168
    ptr = dt->base + (intno << shift);
1169
    e2 = ldl_kernel(ptr + 4);
1170

    
1171
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172
    cpl = env->hflags & HF_CPL_MASK;
1173
    /* check privilege if software int */
1174
    if (is_int && dpl < cpl)
1175
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176

    
1177
    /* Since we emulate only user space, we cannot do more than
1178
       exiting the emulation with the suitable exception and error
1179
       code */
1180
    if (is_int)
1181
        EIP = next_eip;
1182
}
1183

    
1184
/*
1185
 * Begin execution of an interruption. is_int is TRUE if coming from
1186
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187
 * instruction. It is only relevant if is_int is TRUE.
1188
 */
1189
void do_interrupt(int intno, int is_int, int error_code,
1190
                  target_ulong next_eip, int is_hw)
1191
{
1192
    if (loglevel & CPU_LOG_INT) {
1193
        if ((env->cr[0] & CR0_PE_MASK)) {
1194
            static int count;
1195
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196
                    count, intno, error_code, is_int,
1197
                    env->hflags & HF_CPL_MASK,
1198
                    env->segs[R_CS].selector, EIP,
1199
                    (int)env->segs[R_CS].base + EIP,
1200
                    env->segs[R_SS].selector, ESP);
1201
            if (intno == 0x0e) {
1202
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203
            } else {
1204
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1205
            }
1206
            fprintf(logfile, "\n");
1207
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208
#if 0
1209
            {
1210
                int i;
1211
                uint8_t *ptr;
1212
                fprintf(logfile, "       code=");
1213
                ptr = env->segs[R_CS].base + env->eip;
1214
                for(i = 0; i < 16; i++) {
1215
                    fprintf(logfile, " %02x", ldub(ptr + i));
1216
                }
1217
                fprintf(logfile, "\n");
1218
            }
1219
#endif
1220
            count++;
1221
        }
1222
    }
1223
    if (env->cr[0] & CR0_PE_MASK) {
1224
#ifdef TARGET_X86_64
1225
        if (env->hflags & HF_LMA_MASK) {
1226
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227
        } else
1228
#endif
1229
        {
1230
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1231
        }
1232
    } else {
1233
        do_interrupt_real(intno, is_int, error_code, next_eip);
1234
    }
1235
}
1236

    
1237
/*
1238
 * Check nested exceptions and change to double or triple fault if
1239
 * needed. It should only be called, if this is not an interrupt.
1240
 * Returns the new exception number.
1241
 */
1242
static int check_exception(int intno, int *error_code)
1243
{
1244
    int first_contributory = env->old_exception == 0 ||
1245
                              (env->old_exception >= 10 &&
1246
                               env->old_exception <= 13);
1247
    int second_contributory = intno == 0 ||
1248
                               (intno >= 10 && intno <= 13);
1249

    
1250
    if (loglevel & CPU_LOG_INT)
1251
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252
                env->old_exception, intno);
1253

    
1254
    if (env->old_exception == EXCP08_DBLE)
1255
        cpu_abort(env, "triple fault");
1256

    
1257
    if ((first_contributory && second_contributory)
1258
        || (env->old_exception == EXCP0E_PAGE &&
1259
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1260
        intno = EXCP08_DBLE;
1261
        *error_code = 0;
1262
    }
1263

    
1264
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1265
        (intno == EXCP08_DBLE))
1266
        env->old_exception = intno;
1267

    
1268
    return intno;
1269
}
1270

    
1271
/*
1272
 * Signal an interruption. It is executed in the main CPU loop.
1273
 * is_int is TRUE if coming from the int instruction. next_eip is the
1274
 * EIP value AFTER the interrupt instruction. It is only relevant if
1275
 * is_int is TRUE.
1276
 */
1277
void raise_interrupt(int intno, int is_int, int error_code,
1278
                     int next_eip_addend)
1279
{
1280
    if (!is_int) {
1281
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282
        intno = check_exception(intno, &error_code);
1283
    } else {
1284
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1285
    }
1286

    
1287
    env->exception_index = intno;
1288
    env->error_code = error_code;
1289
    env->exception_is_int = is_int;
1290
    env->exception_next_eip = env->eip + next_eip_addend;
1291
    cpu_loop_exit();
1292
}
1293

    
1294
/* shortcuts to generate exceptions */
1295

    
1296
void (raise_exception_err)(int exception_index, int error_code)
1297
{
1298
    raise_interrupt(exception_index, 0, error_code, 0);
1299
}
1300

    
1301
void raise_exception(int exception_index)
1302
{
1303
    raise_interrupt(exception_index, 0, 0, 0);
1304
}
1305

    
1306
/* SMM support */
1307

    
1308
#if defined(CONFIG_USER_ONLY)
1309

    
1310
void do_smm_enter(void)
1311
{
1312
}
1313

    
1314
void helper_rsm(void)
1315
{
1316
}
1317

    
1318
#else
1319

    
1320
#ifdef TARGET_X86_64
1321
#define SMM_REVISION_ID 0x00020064
1322
#else
1323
#define SMM_REVISION_ID 0x00020000
1324
#endif
1325

    
1326
void do_smm_enter(void)
1327
{
1328
    target_ulong sm_state;
1329
    SegmentCache *dt;
1330
    int i, offset;
1331

    
1332
    if (loglevel & CPU_LOG_INT) {
1333
        fprintf(logfile, "SMM: enter\n");
1334
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1335
    }
1336

    
1337
    env->hflags |= HF_SMM_MASK;
1338
    cpu_smm_update(env);
1339

    
1340
    sm_state = env->smbase + 0x8000;
1341

    
1342
#ifdef TARGET_X86_64
1343
    for(i = 0; i < 6; i++) {
1344
        dt = &env->segs[i];
1345
        offset = 0x7e00 + i * 16;
1346
        stw_phys(sm_state + offset, dt->selector);
1347
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348
        stl_phys(sm_state + offset + 4, dt->limit);
1349
        stq_phys(sm_state + offset + 8, dt->base);
1350
    }
1351

    
1352
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1353
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1354

    
1355
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1357
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1359

    
1360
    stq_phys(sm_state + 0x7e88, env->idt.base);
1361
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1362

    
1363
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1364
    stq_phys(sm_state + 0x7e98, env->tr.base);
1365
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1366
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1367

    
1368
    stq_phys(sm_state + 0x7ed0, env->efer);
1369

    
1370
    stq_phys(sm_state + 0x7ff8, EAX);
1371
    stq_phys(sm_state + 0x7ff0, ECX);
1372
    stq_phys(sm_state + 0x7fe8, EDX);
1373
    stq_phys(sm_state + 0x7fe0, EBX);
1374
    stq_phys(sm_state + 0x7fd8, ESP);
1375
    stq_phys(sm_state + 0x7fd0, EBP);
1376
    stq_phys(sm_state + 0x7fc8, ESI);
1377
    stq_phys(sm_state + 0x7fc0, EDI);
1378
    for(i = 8; i < 16; i++)
1379
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380
    stq_phys(sm_state + 0x7f78, env->eip);
1381
    stl_phys(sm_state + 0x7f70, compute_eflags());
1382
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1383
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1384

    
1385
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1386
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1387
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1388

    
1389
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390
    stl_phys(sm_state + 0x7f00, env->smbase);
1391
#else
1392
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1395
    stl_phys(sm_state + 0x7ff0, env->eip);
1396
    stl_phys(sm_state + 0x7fec, EDI);
1397
    stl_phys(sm_state + 0x7fe8, ESI);
1398
    stl_phys(sm_state + 0x7fe4, EBP);
1399
    stl_phys(sm_state + 0x7fe0, ESP);
1400
    stl_phys(sm_state + 0x7fdc, EBX);
1401
    stl_phys(sm_state + 0x7fd8, EDX);
1402
    stl_phys(sm_state + 0x7fd4, ECX);
1403
    stl_phys(sm_state + 0x7fd0, EAX);
1404
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1406

    
1407
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408
    stl_phys(sm_state + 0x7f64, env->tr.base);
1409
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1410
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1411

    
1412
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1414
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1416

    
1417
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1418
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1419

    
1420
    stl_phys(sm_state + 0x7f58, env->idt.base);
1421
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1422

    
1423
    for(i = 0; i < 6; i++) {
1424
        dt = &env->segs[i];
1425
        if (i < 3)
1426
            offset = 0x7f84 + i * 12;
1427
        else
1428
            offset = 0x7f2c + (i - 3) * 12;
1429
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430
        stl_phys(sm_state + offset + 8, dt->base);
1431
        stl_phys(sm_state + offset + 4, dt->limit);
1432
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1433
    }
1434
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1435

    
1436
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437
    stl_phys(sm_state + 0x7ef8, env->smbase);
1438
#endif
1439
    /* init SMM cpu state */
1440

    
1441
#ifdef TARGET_X86_64
1442
    cpu_load_efer(env, 0);
1443
#endif
1444
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445
    env->eip = 0x00008000;
1446
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447
                           0xffffffff, 0);
1448
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1453

    
1454
    cpu_x86_update_cr0(env,
1455
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456
    cpu_x86_update_cr4(env, 0);
1457
    env->dr[7] = 0x00000400;
1458
    CC_OP = CC_OP_EFLAGS;
1459
}
1460

    
1461
void helper_rsm(void)
1462
{
1463
    target_ulong sm_state;
1464
    int i, offset;
1465
    uint32_t val;
1466

    
1467
    sm_state = env->smbase + 0x8000;
1468
#ifdef TARGET_X86_64
1469
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1470

    
1471
    for(i = 0; i < 6; i++) {
1472
        offset = 0x7e00 + i * 16;
1473
        cpu_x86_load_seg_cache(env, i,
1474
                               lduw_phys(sm_state + offset),
1475
                               ldq_phys(sm_state + offset + 8),
1476
                               ldl_phys(sm_state + offset + 4),
1477
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1478
    }
1479

    
1480
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1482

    
1483
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1487

    
1488
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1489
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1490

    
1491
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1493
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1495

    
1496
    EAX = ldq_phys(sm_state + 0x7ff8);
1497
    ECX = ldq_phys(sm_state + 0x7ff0);
1498
    EDX = ldq_phys(sm_state + 0x7fe8);
1499
    EBX = ldq_phys(sm_state + 0x7fe0);
1500
    ESP = ldq_phys(sm_state + 0x7fd8);
1501
    EBP = ldq_phys(sm_state + 0x7fd0);
1502
    ESI = ldq_phys(sm_state + 0x7fc8);
1503
    EDI = ldq_phys(sm_state + 0x7fc0);
1504
    for(i = 8; i < 16; i++)
1505
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506
    env->eip = ldq_phys(sm_state + 0x7f78);
1507
    load_eflags(ldl_phys(sm_state + 0x7f70),
1508
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1511

    
1512
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1515

    
1516
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517
    if (val & 0x20000) {
1518
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1519
    }
1520
#else
1521
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1524
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525
    env->eip = ldl_phys(sm_state + 0x7ff0);
1526
    EDI = ldl_phys(sm_state + 0x7fec);
1527
    ESI = ldl_phys(sm_state + 0x7fe8);
1528
    EBP = ldl_phys(sm_state + 0x7fe4);
1529
    ESP = ldl_phys(sm_state + 0x7fe0);
1530
    EBX = ldl_phys(sm_state + 0x7fdc);
1531
    EDX = ldl_phys(sm_state + 0x7fd8);
1532
    ECX = ldl_phys(sm_state + 0x7fd4);
1533
    EAX = ldl_phys(sm_state + 0x7fd0);
1534
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1536

    
1537
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1539
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1541

    
1542
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1546

    
1547
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1549

    
1550
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1551
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1552

    
1553
    for(i = 0; i < 6; i++) {
1554
        if (i < 3)
1555
            offset = 0x7f84 + i * 12;
1556
        else
1557
            offset = 0x7f2c + (i - 3) * 12;
1558
        cpu_x86_load_seg_cache(env, i,
1559
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560
                               ldl_phys(sm_state + offset + 8),
1561
                               ldl_phys(sm_state + offset + 4),
1562
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1563
    }
1564
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1565

    
1566
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567
    if (val & 0x20000) {
1568
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1569
    }
1570
#endif
1571
    CC_OP = CC_OP_EFLAGS;
1572
    env->hflags &= ~HF_SMM_MASK;
1573
    cpu_smm_update(env);
1574

    
1575
    if (loglevel & CPU_LOG_INT) {
1576
        fprintf(logfile, "SMM: after RSM\n");
1577
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578
    }
1579
}
1580

    
1581
#endif /* !CONFIG_USER_ONLY */
1582

    
1583

    
1584
/* division, flags are undefined */
1585

    
1586
void helper_divb_AL(target_ulong t0)
1587
{
1588
    unsigned int num, den, q, r;
1589

    
1590
    num = (EAX & 0xffff);
1591
    den = (t0 & 0xff);
1592
    if (den == 0) {
1593
        raise_exception(EXCP00_DIVZ);
1594
    }
1595
    q = (num / den);
1596
    if (q > 0xff)
1597
        raise_exception(EXCP00_DIVZ);
1598
    q &= 0xff;
1599
    r = (num % den) & 0xff;
1600
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1601
}
1602

    
1603
void helper_idivb_AL(target_ulong t0)
1604
{
1605
    int num, den, q, r;
1606

    
1607
    num = (int16_t)EAX;
1608
    den = (int8_t)t0;
1609
    if (den == 0) {
1610
        raise_exception(EXCP00_DIVZ);
1611
    }
1612
    q = (num / den);
1613
    if (q != (int8_t)q)
1614
        raise_exception(EXCP00_DIVZ);
1615
    q &= 0xff;
1616
    r = (num % den) & 0xff;
1617
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1618
}
1619

    
1620
void helper_divw_AX(target_ulong t0)
1621
{
1622
    unsigned int num, den, q, r;
1623

    
1624
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625
    den = (t0 & 0xffff);
1626
    if (den == 0) {
1627
        raise_exception(EXCP00_DIVZ);
1628
    }
1629
    q = (num / den);
1630
    if (q > 0xffff)
1631
        raise_exception(EXCP00_DIVZ);
1632
    q &= 0xffff;
1633
    r = (num % den) & 0xffff;
1634
    EAX = (EAX & ~0xffff) | q;
1635
    EDX = (EDX & ~0xffff) | r;
1636
}
1637

    
1638
void helper_idivw_AX(target_ulong t0)
1639
{
1640
    int num, den, q, r;
1641

    
1642
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643
    den = (int16_t)t0;
1644
    if (den == 0) {
1645
        raise_exception(EXCP00_DIVZ);
1646
    }
1647
    q = (num / den);
1648
    if (q != (int16_t)q)
1649
        raise_exception(EXCP00_DIVZ);
1650
    q &= 0xffff;
1651
    r = (num % den) & 0xffff;
1652
    EAX = (EAX & ~0xffff) | q;
1653
    EDX = (EDX & ~0xffff) | r;
1654
}
1655

    
1656
void helper_divl_EAX(target_ulong t0)
1657
{
1658
    unsigned int den, r;
1659
    uint64_t num, q;
1660

    
1661
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662
    den = t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    r = (num % den);
1668
    if (q > 0xffffffff)
1669
        raise_exception(EXCP00_DIVZ);
1670
    EAX = (uint32_t)q;
1671
    EDX = (uint32_t)r;
1672
}
1673

    
1674
void helper_idivl_EAX(target_ulong t0)
1675
{
1676
    int den, r;
1677
    int64_t num, q;
1678

    
1679
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680
    den = t0;
1681
    if (den == 0) {
1682
        raise_exception(EXCP00_DIVZ);
1683
    }
1684
    q = (num / den);
1685
    r = (num % den);
1686
    if (q != (int32_t)q)
1687
        raise_exception(EXCP00_DIVZ);
1688
    EAX = (uint32_t)q;
1689
    EDX = (uint32_t)r;
1690
}
1691

    
1692
/* bcd */
1693

    
1694
/* XXX: exception */
1695
void helper_aam(int base)
1696
{
1697
    int al, ah;
1698
    al = EAX & 0xff;
1699
    ah = al / base;
1700
    al = al % base;
1701
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1702
    CC_DST = al;
1703
}
1704

    
1705
void helper_aad(int base)
1706
{
1707
    int al, ah;
1708
    al = EAX & 0xff;
1709
    ah = (EAX >> 8) & 0xff;
1710
    al = ((ah * base) + al) & 0xff;
1711
    EAX = (EAX & ~0xffff) | al;
1712
    CC_DST = al;
1713
}
1714

    
1715
void helper_aaa(void)
1716
{
1717
    int icarry;
1718
    int al, ah, af;
1719
    int eflags;
1720

    
1721
    eflags = cc_table[CC_OP].compute_all();
1722
    af = eflags & CC_A;
1723
    al = EAX & 0xff;
1724
    ah = (EAX >> 8) & 0xff;
1725

    
1726
    icarry = (al > 0xf9);
1727
    if (((al & 0x0f) > 9 ) || af) {
1728
        al = (al + 6) & 0x0f;
1729
        ah = (ah + 1 + icarry) & 0xff;
1730
        eflags |= CC_C | CC_A;
1731
    } else {
1732
        eflags &= ~(CC_C | CC_A);
1733
        al &= 0x0f;
1734
    }
1735
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1736
    CC_SRC = eflags;
1737
    FORCE_RET();
1738
}
1739

    
1740
void helper_aas(void)
1741
{
1742
    int icarry;
1743
    int al, ah, af;
1744
    int eflags;
1745

    
1746
    eflags = cc_table[CC_OP].compute_all();
1747
    af = eflags & CC_A;
1748
    al = EAX & 0xff;
1749
    ah = (EAX >> 8) & 0xff;
1750

    
1751
    icarry = (al < 6);
1752
    if (((al & 0x0f) > 9 ) || af) {
1753
        al = (al - 6) & 0x0f;
1754
        ah = (ah - 1 - icarry) & 0xff;
1755
        eflags |= CC_C | CC_A;
1756
    } else {
1757
        eflags &= ~(CC_C | CC_A);
1758
        al &= 0x0f;
1759
    }
1760
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1761
    CC_SRC = eflags;
1762
    FORCE_RET();
1763
}
1764

    
1765
void helper_daa(void)
1766
{
1767
    int al, af, cf;
1768
    int eflags;
1769

    
1770
    eflags = cc_table[CC_OP].compute_all();
1771
    cf = eflags & CC_C;
1772
    af = eflags & CC_A;
1773
    al = EAX & 0xff;
1774

    
1775
    eflags = 0;
1776
    if (((al & 0x0f) > 9 ) || af) {
1777
        al = (al + 6) & 0xff;
1778
        eflags |= CC_A;
1779
    }
1780
    if ((al > 0x9f) || cf) {
1781
        al = (al + 0x60) & 0xff;
1782
        eflags |= CC_C;
1783
    }
1784
    EAX = (EAX & ~0xff) | al;
1785
    /* well, speed is not an issue here, so we compute the flags by hand */
1786
    eflags |= (al == 0) << 6; /* zf */
1787
    eflags |= parity_table[al]; /* pf */
1788
    eflags |= (al & 0x80); /* sf */
1789
    CC_SRC = eflags;
1790
    FORCE_RET();
1791
}
1792

    
1793
void helper_das(void)
1794
{
1795
    int al, al1, af, cf;
1796
    int eflags;
1797

    
1798
    eflags = cc_table[CC_OP].compute_all();
1799
    cf = eflags & CC_C;
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802

    
1803
    eflags = 0;
1804
    al1 = al;
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        eflags |= CC_A;
1807
        if (al < 6 || cf)
1808
            eflags |= CC_C;
1809
        al = (al - 6) & 0xff;
1810
    }
1811
    if ((al1 > 0x99) || cf) {
1812
        al = (al - 0x60) & 0xff;
1813
        eflags |= CC_C;
1814
    }
1815
    EAX = (EAX & ~0xff) | al;
1816
    /* well, speed is not an issue here, so we compute the flags by hand */
1817
    eflags |= (al == 0) << 6; /* zf */
1818
    eflags |= parity_table[al]; /* pf */
1819
    eflags |= (al & 0x80); /* sf */
1820
    CC_SRC = eflags;
1821
    FORCE_RET();
1822
}
1823

    
1824
void helper_into(int next_eip_addend)
1825
{
1826
    int eflags;
1827
    eflags = cc_table[CC_OP].compute_all();
1828
    if (eflags & CC_O) {
1829
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1830
    }
1831
}
1832

    
1833
void helper_cmpxchg8b(target_ulong a0)
1834
{
1835
    uint64_t d;
1836
    int eflags;
1837

    
1838
    eflags = cc_table[CC_OP].compute_all();
1839
    d = ldq(a0);
1840
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842
        eflags |= CC_Z;
1843
    } else {
1844
        /* always do the store */
1845
        stq(a0, d); 
1846
        EDX = (uint32_t)(d >> 32);
1847
        EAX = (uint32_t)d;
1848
        eflags &= ~CC_Z;
1849
    }
1850
    CC_SRC = eflags;
1851
}
1852

    
1853
#ifdef TARGET_X86_64
1854
void helper_cmpxchg16b(target_ulong a0)
1855
{
1856
    uint64_t d0, d1;
1857
    int eflags;
1858

    
1859
    if ((a0 & 0xf) != 0)
1860
        raise_exception(EXCP0D_GPF);
1861
    eflags = cc_table[CC_OP].compute_all();
1862
    d0 = ldq(a0);
1863
    d1 = ldq(a0 + 8);
1864
    if (d0 == EAX && d1 == EDX) {
1865
        stq(a0, EBX);
1866
        stq(a0 + 8, ECX);
1867
        eflags |= CC_Z;
1868
    } else {
1869
        /* always do the store */
1870
        stq(a0, d0); 
1871
        stq(a0 + 8, d1); 
1872
        EDX = d1;
1873
        EAX = d0;
1874
        eflags &= ~CC_Z;
1875
    }
1876
    CC_SRC = eflags;
1877
}
1878
#endif
1879

    
1880
void helper_single_step(void)
1881
{
1882
    env->dr[6] |= 0x4000;
1883
    raise_exception(EXCP01_SSTP);
1884
}
1885

    
1886
void helper_cpuid(void)
1887
{
1888
    uint32_t index;
1889

    
1890
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1891
    
1892
    index = (uint32_t)EAX;
1893
    /* test if maximum index reached */
1894
    if (index & 0x80000000) {
1895
        if (index > env->cpuid_xlevel)
1896
            index = env->cpuid_level;
1897
    } else {
1898
        if (index > env->cpuid_level)
1899
            index = env->cpuid_level;
1900
    }
1901

    
1902
    switch(index) {
1903
    case 0:
1904
        EAX = env->cpuid_level;
1905
        EBX = env->cpuid_vendor1;
1906
        EDX = env->cpuid_vendor2;
1907
        ECX = env->cpuid_vendor3;
1908
        break;
1909
    case 1:
1910
        EAX = env->cpuid_version;
1911
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912
        ECX = env->cpuid_ext_features;
1913
        EDX = env->cpuid_features;
1914
        break;
1915
    case 2:
1916
        /* cache info: needed for Pentium Pro compatibility */
1917
        EAX = 1;
1918
        EBX = 0;
1919
        ECX = 0;
1920
        EDX = 0x2c307d;
1921
        break;
1922
    case 4:
1923
        /* cache info: needed for Core compatibility */
1924
        switch (ECX) {
1925
            case 0: /* L1 dcache info */
1926
                EAX = 0x0000121;
1927
                EBX = 0x1c0003f;
1928
                ECX = 0x000003f;
1929
                EDX = 0x0000001;
1930
                break;
1931
            case 1: /* L1 icache info */
1932
                EAX = 0x0000122;
1933
                EBX = 0x1c0003f;
1934
                ECX = 0x000003f;
1935
                EDX = 0x0000001;
1936
                break;
1937
            case 2: /* L2 cache info */
1938
                EAX = 0x0000143;
1939
                EBX = 0x3c0003f;
1940
                ECX = 0x0000fff;
1941
                EDX = 0x0000001;
1942
                break;
1943
            default: /* end of info */
1944
                EAX = 0;
1945
                EBX = 0;
1946
                ECX = 0;
1947
                EDX = 0;
1948
                break;
1949
        }
1950

    
1951
        break;
1952
    case 5:
1953
        /* mwait info: needed for Core compatibility */
1954
        EAX = 0; /* Smallest monitor-line size in bytes */
1955
        EBX = 0; /* Largest monitor-line size in bytes */
1956
        ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1957
        EDX = 0;
1958
        break;
1959
    case 6:
1960
        /* Thermal and Power Leaf */
1961
        EAX = 0;
1962
        EBX = 0;
1963
        ECX = 0;
1964
        EDX = 0;
1965
        break;
1966
    case 9:
1967
        /* Direct Cache Access Information Leaf */
1968
        EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
1969
        EBX = 0;
1970
        ECX = 0;
1971
        EDX = 0;
1972
        break;
1973
    case 0xA:
1974
        /* Architectural Performance Monitoring Leaf */
1975
        EAX = 0;
1976
        EBX = 0;
1977
        ECX = 0;
1978
        EDX = 0;
1979
        break;
1980
    case 0x80000000:
1981
        EAX = env->cpuid_xlevel;
1982
        EBX = env->cpuid_vendor1;
1983
        EDX = env->cpuid_vendor2;
1984
        ECX = env->cpuid_vendor3;
1985
        break;
1986
    case 0x80000001:
1987
        EAX = env->cpuid_features;
1988
        EBX = 0;
1989
        ECX = env->cpuid_ext3_features;
1990
        EDX = env->cpuid_ext2_features;
1991
        break;
1992
    case 0x80000002:
1993
    case 0x80000003:
1994
    case 0x80000004:
1995
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1996
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1997
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1998
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1999
        break;
2000
    case 0x80000005:
2001
        /* cache info (L1 cache) */
2002
        EAX = 0x01ff01ff;
2003
        EBX = 0x01ff01ff;
2004
        ECX = 0x40020140;
2005
        EDX = 0x40020140;
2006
        break;
2007
    case 0x80000006:
2008
        /* cache info (L2 cache) */
2009
        EAX = 0;
2010
        EBX = 0x42004200;
2011
        ECX = 0x02008140;
2012
        EDX = 0;
2013
        break;
2014
    case 0x80000008:
2015
        /* virtual & phys address size in low 2 bytes. */
2016
/* XXX: This value must match the one used in the MMU code. */ 
2017
        if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2018
            /* 64 bit processor */
2019
#if defined(USE_KQEMU)
2020
            EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
2021
#else
2022
/* XXX: The physical address space is limited to 42 bits in exec.c. */
2023
            EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
2024
#endif
2025
        } else {
2026
#if defined(USE_KQEMU)
2027
            EAX = 0x00000020;        /* 32 bits physical */
2028
#else
2029
            if (env->cpuid_features & CPUID_PSE36)
2030
                EAX = 0x00000024; /* 36 bits physical */
2031
            else
2032
                EAX = 0x00000020; /* 32 bits physical */
2033
#endif
2034
        }
2035
        EBX = 0;
2036
        ECX = 0;
2037
        EDX = 0;
2038
        break;
2039
    case 0x8000000A:
2040
        EAX = 0x00000001; /* SVM Revision */
2041
        EBX = 0x00000010; /* nr of ASIDs */
2042
        ECX = 0;
2043
        EDX = 0; /* optional features */
2044
        break;
2045
    default:
2046
        /* reserved values: zero */
2047
        EAX = 0;
2048
        EBX = 0;
2049
        ECX = 0;
2050
        EDX = 0;
2051
        break;
2052
    }
2053
}
2054

    
2055
void helper_enter_level(int level, int data32, target_ulong t1)
2056
{
2057
    target_ulong ssp;
2058
    uint32_t esp_mask, esp, ebp;
2059

    
2060
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2061
    ssp = env->segs[R_SS].base;
2062
    ebp = EBP;
2063
    esp = ESP;
2064
    if (data32) {
2065
        /* 32 bit */
2066
        esp -= 4;
2067
        while (--level) {
2068
            esp -= 4;
2069
            ebp -= 4;
2070
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2071
        }
2072
        esp -= 4;
2073
        stl(ssp + (esp & esp_mask), t1);
2074
    } else {
2075
        /* 16 bit */
2076
        esp -= 2;
2077
        while (--level) {
2078
            esp -= 2;
2079
            ebp -= 2;
2080
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2081
        }
2082
        esp -= 2;
2083
        stw(ssp + (esp & esp_mask), t1);
2084
    }
2085
}
2086

    
2087
#ifdef TARGET_X86_64
2088
void helper_enter64_level(int level, int data64, target_ulong t1)
2089
{
2090
    target_ulong esp, ebp;
2091
    ebp = EBP;
2092
    esp = ESP;
2093

    
2094
    if (data64) {
2095
        /* 64 bit */
2096
        esp -= 8;
2097
        while (--level) {
2098
            esp -= 8;
2099
            ebp -= 8;
2100
            stq(esp, ldq(ebp));
2101
        }
2102
        esp -= 8;
2103
        stq(esp, t1);
2104
    } else {
2105
        /* 16 bit */
2106
        esp -= 2;
2107
        while (--level) {
2108
            esp -= 2;
2109
            ebp -= 2;
2110
            stw(esp, lduw(ebp));
2111
        }
2112
        esp -= 2;
2113
        stw(esp, t1);
2114
    }
2115
}
2116
#endif
2117

    
2118
void helper_lldt(int selector)
2119
{
2120
    SegmentCache *dt;
2121
    uint32_t e1, e2;
2122
    int index, entry_limit;
2123
    target_ulong ptr;
2124

    
2125
    selector &= 0xffff;
2126
    if ((selector & 0xfffc) == 0) {
2127
        /* XXX: NULL selector case: invalid LDT */
2128
        env->ldt.base = 0;
2129
        env->ldt.limit = 0;
2130
    } else {
2131
        if (selector & 0x4)
2132
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133
        dt = &env->gdt;
2134
        index = selector & ~7;
2135
#ifdef TARGET_X86_64
2136
        if (env->hflags & HF_LMA_MASK)
2137
            entry_limit = 15;
2138
        else
2139
#endif
2140
            entry_limit = 7;
2141
        if ((index + entry_limit) > dt->limit)
2142
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2143
        ptr = dt->base + index;
2144
        e1 = ldl_kernel(ptr);
2145
        e2 = ldl_kernel(ptr + 4);
2146
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2147
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148
        if (!(e2 & DESC_P_MASK))
2149
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2150
#ifdef TARGET_X86_64
2151
        if (env->hflags & HF_LMA_MASK) {
2152
            uint32_t e3;
2153
            e3 = ldl_kernel(ptr + 8);
2154
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2155
            env->ldt.base |= (target_ulong)e3 << 32;
2156
        } else
2157
#endif
2158
        {
2159
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2160
        }
2161
    }
2162
    env->ldt.selector = selector;
2163
}
2164

    
2165
void helper_ltr(int selector)
2166
{
2167
    SegmentCache *dt;
2168
    uint32_t e1, e2;
2169
    int index, type, entry_limit;
2170
    target_ulong ptr;
2171

    
2172
    selector &= 0xffff;
2173
    if ((selector & 0xfffc) == 0) {
2174
        /* NULL selector case: invalid TR */
2175
        env->tr.base = 0;
2176
        env->tr.limit = 0;
2177
        env->tr.flags = 0;
2178
    } else {
2179
        if (selector & 0x4)
2180
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2181
        dt = &env->gdt;
2182
        index = selector & ~7;
2183
#ifdef TARGET_X86_64
2184
        if (env->hflags & HF_LMA_MASK)
2185
            entry_limit = 15;
2186
        else
2187
#endif
2188
            entry_limit = 7;
2189
        if ((index + entry_limit) > dt->limit)
2190
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2191
        ptr = dt->base + index;
2192
        e1 = ldl_kernel(ptr);
2193
        e2 = ldl_kernel(ptr + 4);
2194
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2195
        if ((e2 & DESC_S_MASK) ||
2196
            (type != 1 && type != 9))
2197
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198
        if (!(e2 & DESC_P_MASK))
2199
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2200
#ifdef TARGET_X86_64
2201
        if (env->hflags & HF_LMA_MASK) {
2202
            uint32_t e3, e4;
2203
            e3 = ldl_kernel(ptr + 8);
2204
            e4 = ldl_kernel(ptr + 12);
2205
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2206
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2207
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2208
            env->tr.base |= (target_ulong)e3 << 32;
2209
        } else
2210
#endif
2211
        {
2212
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2213
        }
2214
        e2 |= DESC_TSS_BUSY_MASK;
2215
        stl_kernel(ptr + 4, e2);
2216
    }
2217
    env->tr.selector = selector;
2218
}
2219

    
2220
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2221
void helper_load_seg(int seg_reg, int selector)
2222
{
2223
    uint32_t e1, e2;
2224
    int cpl, dpl, rpl;
2225
    SegmentCache *dt;
2226
    int index;
2227
    target_ulong ptr;
2228

    
2229
    selector &= 0xffff;
2230
    cpl = env->hflags & HF_CPL_MASK;
2231
    if ((selector & 0xfffc) == 0) {
2232
        /* null selector case */
2233
        if (seg_reg == R_SS
2234
#ifdef TARGET_X86_64
2235
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2236
#endif
2237
            )
2238
            raise_exception_err(EXCP0D_GPF, 0);
2239
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2240
    } else {
2241

    
2242
        if (selector & 0x4)
2243
            dt = &env->ldt;
2244
        else
2245
            dt = &env->gdt;
2246
        index = selector & ~7;
2247
        if ((index + 7) > dt->limit)
2248
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2249
        ptr = dt->base + index;
2250
        e1 = ldl_kernel(ptr);
2251
        e2 = ldl_kernel(ptr + 4);
2252

    
2253
        if (!(e2 & DESC_S_MASK))
2254
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2255
        rpl = selector & 3;
2256
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2257
        if (seg_reg == R_SS) {
2258
            /* must be writable segment */
2259
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2260
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2261
            if (rpl != cpl || dpl != cpl)
2262
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2263
        } else {
2264
            /* must be readable segment */
2265
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2266
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2267

    
2268
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2269
                /* if not conforming code, test rights */
2270
                if (dpl < cpl || dpl < rpl)
2271
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2272
            }
2273
        }
2274

    
2275
        if (!(e2 & DESC_P_MASK)) {
2276
            if (seg_reg == R_SS)
2277
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2278
            else
2279
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2280
        }
2281

    
2282
        /* set the access bit if not already set */
2283
        if (!(e2 & DESC_A_MASK)) {
2284
            e2 |= DESC_A_MASK;
2285
            stl_kernel(ptr + 4, e2);
2286
        }
2287

    
2288
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2289
                       get_seg_base(e1, e2),
2290
                       get_seg_limit(e1, e2),
2291
                       e2);
2292
#if 0
2293
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2294
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2295
#endif
2296
    }
2297
}
2298

    
2299
/* protected mode jump */
2300
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2301
                           int next_eip_addend)
2302
{
2303
    int gate_cs, type;
2304
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2305
    target_ulong next_eip;
2306

    
2307
    if ((new_cs & 0xfffc) == 0)
2308
        raise_exception_err(EXCP0D_GPF, 0);
2309
    if (load_segment(&e1, &e2, new_cs) != 0)
2310
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2311
    cpl = env->hflags & HF_CPL_MASK;
2312
    if (e2 & DESC_S_MASK) {
2313
        if (!(e2 & DESC_CS_MASK))
2314
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2315
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316
        if (e2 & DESC_C_MASK) {
2317
            /* conforming code segment */
2318
            if (dpl > cpl)
2319
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2320
        } else {
2321
            /* non conforming code segment */
2322
            rpl = new_cs & 3;
2323
            if (rpl > cpl)
2324
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325
            if (dpl != cpl)
2326
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2327
        }
2328
        if (!(e2 & DESC_P_MASK))
2329
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2330
        limit = get_seg_limit(e1, e2);
2331
        if (new_eip > limit &&
2332
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2333
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2335
                       get_seg_base(e1, e2), limit, e2);
2336
        EIP = new_eip;
2337
    } else {
2338
        /* jump to call or task gate */
2339
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2340
        rpl = new_cs & 3;
2341
        cpl = env->hflags & HF_CPL_MASK;
2342
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2343
        switch(type) {
2344
        case 1: /* 286 TSS */
2345
        case 9: /* 386 TSS */
2346
        case 5: /* task gate */
2347
            if (dpl < cpl || dpl < rpl)
2348
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2349
            next_eip = env->eip + next_eip_addend;
2350
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2351
            CC_OP = CC_OP_EFLAGS;
2352
            break;
2353
        case 4: /* 286 call gate */
2354
        case 12: /* 386 call gate */
2355
            if ((dpl < cpl) || (dpl < rpl))
2356
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2357
            if (!(e2 & DESC_P_MASK))
2358
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2359
            gate_cs = e1 >> 16;
2360
            new_eip = (e1 & 0xffff);
2361
            if (type == 12)
2362
                new_eip |= (e2 & 0xffff0000);
2363
            if (load_segment(&e1, &e2, gate_cs) != 0)
2364
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2365
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366
            /* must be code segment */
2367
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2368
                 (DESC_S_MASK | DESC_CS_MASK)))
2369
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2370
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2371
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2372
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2373
            if (!(e2 & DESC_P_MASK))
2374
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2375
            limit = get_seg_limit(e1, e2);
2376
            if (new_eip > limit)
2377
                raise_exception_err(EXCP0D_GPF, 0);
2378
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2379
                                   get_seg_base(e1, e2), limit, e2);
2380
            EIP = new_eip;
2381
            break;
2382
        default:
2383
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384
            break;
2385
        }
2386
    }
2387
}
2388

    
2389
/* real mode call */
2390
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2391
                       int shift, int next_eip)
2392
{
2393
    int new_eip;
2394
    uint32_t esp, esp_mask;
2395
    target_ulong ssp;
2396

    
2397
    new_eip = new_eip1;
2398
    esp = ESP;
2399
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2400
    ssp = env->segs[R_SS].base;
2401
    if (shift) {
2402
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2403
        PUSHL(ssp, esp, esp_mask, next_eip);
2404
    } else {
2405
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2406
        PUSHW(ssp, esp, esp_mask, next_eip);
2407
    }
2408

    
2409
    SET_ESP(esp, esp_mask);
2410
    env->eip = new_eip;
2411
    env->segs[R_CS].selector = new_cs;
2412
    env->segs[R_CS].base = (new_cs << 4);
2413
}
2414

    
2415
/* protected mode call */
2416
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2417
                            int shift, int next_eip_addend)
2418
{
2419
    int new_stack, i;
2420
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2421
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2422
    uint32_t val, limit, old_sp_mask;
2423
    target_ulong ssp, old_ssp, next_eip;
2424

    
2425
    next_eip = env->eip + next_eip_addend;
2426
#ifdef DEBUG_PCALL
2427
    if (loglevel & CPU_LOG_PCALL) {
2428
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2429
                new_cs, (uint32_t)new_eip, shift);
2430
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2431
    }
2432
#endif
2433
    if ((new_cs & 0xfffc) == 0)
2434
        raise_exception_err(EXCP0D_GPF, 0);
2435
    if (load_segment(&e1, &e2, new_cs) != 0)
2436
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2437
    cpl = env->hflags & HF_CPL_MASK;
2438
#ifdef DEBUG_PCALL
2439
    if (loglevel & CPU_LOG_PCALL) {
2440
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2441
    }
2442
#endif
2443
    if (e2 & DESC_S_MASK) {
2444
        if (!(e2 & DESC_CS_MASK))
2445
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2447
        if (e2 & DESC_C_MASK) {
2448
            /* conforming code segment */
2449
            if (dpl > cpl)
2450
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2451
        } else {
2452
            /* non conforming code segment */
2453
            rpl = new_cs & 3;
2454
            if (rpl > cpl)
2455
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2456
            if (dpl != cpl)
2457
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2458
        }
2459
        if (!(e2 & DESC_P_MASK))
2460
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2461

    
2462
#ifdef TARGET_X86_64
2463
        /* XXX: check 16/32 bit cases in long mode */
2464
        if (shift == 2) {
2465
            target_ulong rsp;
2466
            /* 64 bit case */
2467
            rsp = ESP;
2468
            PUSHQ(rsp, env->segs[R_CS].selector);
2469
            PUSHQ(rsp, next_eip);
2470
            /* from this point, not restartable */
2471
            ESP = rsp;
2472
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2473
                                   get_seg_base(e1, e2),
2474
                                   get_seg_limit(e1, e2), e2);
2475
            EIP = new_eip;
2476
        } else
2477
#endif
2478
        {
2479
            sp = ESP;
2480
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2481
            ssp = env->segs[R_SS].base;
2482
            if (shift) {
2483
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484
                PUSHL(ssp, sp, sp_mask, next_eip);
2485
            } else {
2486
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2487
                PUSHW(ssp, sp, sp_mask, next_eip);
2488
            }
2489

    
2490
            limit = get_seg_limit(e1, e2);
2491
            if (new_eip > limit)
2492
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2493
            /* from this point, not restartable */
2494
            SET_ESP(sp, sp_mask);
2495
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2496
                                   get_seg_base(e1, e2), limit, e2);
2497
            EIP = new_eip;
2498
        }
2499
    } else {
2500
        /* check gate type */
2501
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2502
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2503
        rpl = new_cs & 3;
2504
        switch(type) {
2505
        case 1: /* available 286 TSS */
2506
        case 9: /* available 386 TSS */
2507
        case 5: /* task gate */
2508
            if (dpl < cpl || dpl < rpl)
2509
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2510
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2511
            CC_OP = CC_OP_EFLAGS;
2512
            return;
2513
        case 4: /* 286 call gate */
2514
        case 12: /* 386 call gate */
2515
            break;
2516
        default:
2517
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2518
            break;
2519
        }
2520
        shift = type >> 3;
2521

    
2522
        if (dpl < cpl || dpl < rpl)
2523
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2524
        /* check valid bit */
2525
        if (!(e2 & DESC_P_MASK))
2526
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2527
        selector = e1 >> 16;
2528
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2529
        param_count = e2 & 0x1f;
2530
        if ((selector & 0xfffc) == 0)
2531
            raise_exception_err(EXCP0D_GPF, 0);
2532

    
2533
        if (load_segment(&e1, &e2, selector) != 0)
2534
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2535
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2536
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2537
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2538
        if (dpl > cpl)
2539
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2540
        if (!(e2 & DESC_P_MASK))
2541
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2542

    
2543
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2544
            /* to inner privilege */
2545
            get_ss_esp_from_tss(&ss, &sp, dpl);
2546
#ifdef DEBUG_PCALL
2547
            if (loglevel & CPU_LOG_PCALL)
2548
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2549
                        ss, sp, param_count, ESP);
2550
#endif
2551
            if ((ss & 0xfffc) == 0)
2552
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2553
            if ((ss & 3) != dpl)
2554
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2555
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2556
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2557
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2558
            if (ss_dpl != dpl)
2559
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2560
            if (!(ss_e2 & DESC_S_MASK) ||
2561
                (ss_e2 & DESC_CS_MASK) ||
2562
                !(ss_e2 & DESC_W_MASK))
2563
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2564
            if (!(ss_e2 & DESC_P_MASK))
2565
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2566

    
2567
            //            push_size = ((param_count * 2) + 8) << shift;
2568

    
2569
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2570
            old_ssp = env->segs[R_SS].base;
2571

    
2572
            sp_mask = get_sp_mask(ss_e2);
2573
            ssp = get_seg_base(ss_e1, ss_e2);
2574
            if (shift) {
2575
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2576
                PUSHL(ssp, sp, sp_mask, ESP);
2577
                for(i = param_count - 1; i >= 0; i--) {
2578
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2579
                    PUSHL(ssp, sp, sp_mask, val);
2580
                }
2581
            } else {
2582
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2583
                PUSHW(ssp, sp, sp_mask, ESP);
2584
                for(i = param_count - 1; i >= 0; i--) {
2585
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2586
                    PUSHW(ssp, sp, sp_mask, val);
2587
                }
2588
            }
2589
            new_stack = 1;
2590
        } else {
2591
            /* to same privilege */
2592
            sp = ESP;
2593
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2594
            ssp = env->segs[R_SS].base;
2595
            //            push_size = (4 << shift);
2596
            new_stack = 0;
2597
        }
2598

    
2599
        if (shift) {
2600
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2601
            PUSHL(ssp, sp, sp_mask, next_eip);
2602
        } else {
2603
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2604
            PUSHW(ssp, sp, sp_mask, next_eip);
2605
        }
2606

    
2607
        /* from this point, not restartable */
2608

    
2609
        if (new_stack) {
2610
            ss = (ss & ~3) | dpl;
2611
            cpu_x86_load_seg_cache(env, R_SS, ss,
2612
                                   ssp,
2613
                                   get_seg_limit(ss_e1, ss_e2),
2614
                                   ss_e2);
2615
        }
2616

    
2617
        selector = (selector & ~3) | dpl;
2618
        cpu_x86_load_seg_cache(env, R_CS, selector,
2619
                       get_seg_base(e1, e2),
2620
                       get_seg_limit(e1, e2),
2621
                       e2);
2622
        cpu_x86_set_cpl(env, dpl);
2623
        SET_ESP(sp, sp_mask);
2624
        EIP = offset;
2625
    }
2626
#ifdef USE_KQEMU
2627
    if (kqemu_is_ok(env)) {
2628
        env->exception_index = -1;
2629
        cpu_loop_exit();
2630
    }
2631
#endif
2632
}
2633

    
2634
/* real and vm86 mode iret */
2635
void helper_iret_real(int shift)
2636
{
2637
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2638
    target_ulong ssp;
2639
    int eflags_mask;
2640

    
2641
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2642
    sp = ESP;
2643
    ssp = env->segs[R_SS].base;
2644
    if (shift == 1) {
2645
        /* 32 bits */
2646
        POPL(ssp, sp, sp_mask, new_eip);
2647
        POPL(ssp, sp, sp_mask, new_cs);
2648
        new_cs &= 0xffff;
2649
        POPL(ssp, sp, sp_mask, new_eflags);
2650
    } else {
2651
        /* 16 bits */
2652
        POPW(ssp, sp, sp_mask, new_eip);
2653
        POPW(ssp, sp, sp_mask, new_cs);
2654
        POPW(ssp, sp, sp_mask, new_eflags);
2655
    }
2656
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2657
    env->segs[R_CS].selector = new_cs;
2658
    env->segs[R_CS].base = (new_cs << 4);
2659
    env->eip = new_eip;
2660
    if (env->eflags & VM_MASK)
2661
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2662
    else
2663
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2664
    if (shift == 0)
2665
        eflags_mask &= 0xffff;
2666
    load_eflags(new_eflags, eflags_mask);
2667
    env->hflags2 &= ~HF2_NMI_MASK;
2668
}
2669

    
2670
static inline void validate_seg(int seg_reg, int cpl)
2671
{
2672
    int dpl;
2673
    uint32_t e2;
2674

    
2675
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2676
       they may still contain a valid base. I would be interested to
2677
       know how a real x86_64 CPU behaves */
2678
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2679
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2680
        return;
2681

    
2682
    e2 = env->segs[seg_reg].flags;
2683
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2684
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2685
        /* data or non conforming code segment */
2686
        if (dpl < cpl) {
2687
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2688
        }
2689
    }
2690
}
2691

    
2692
/* protected mode iret */
2693
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2694
{
2695
    uint32_t new_cs, new_eflags, new_ss;
2696
    uint32_t new_es, new_ds, new_fs, new_gs;
2697
    uint32_t e1, e2, ss_e1, ss_e2;
2698
    int cpl, dpl, rpl, eflags_mask, iopl;
2699
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2700

    
2701
#ifdef TARGET_X86_64
2702
    if (shift == 2)
2703
        sp_mask = -1;
2704
    else
2705
#endif
2706
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2707
    sp = ESP;
2708
    ssp = env->segs[R_SS].base;
2709
    new_eflags = 0; /* avoid warning */
2710
#ifdef TARGET_X86_64
2711
    if (shift == 2) {
2712
        POPQ(sp, new_eip);
2713
        POPQ(sp, new_cs);
2714
        new_cs &= 0xffff;
2715
        if (is_iret) {
2716
            POPQ(sp, new_eflags);
2717
        }
2718
    } else
2719
#endif
2720
    if (shift == 1) {
2721
        /* 32 bits */
2722
        POPL(ssp, sp, sp_mask, new_eip);
2723
        POPL(ssp, sp, sp_mask, new_cs);
2724
        new_cs &= 0xffff;
2725
        if (is_iret) {
2726
            POPL(ssp, sp, sp_mask, new_eflags);
2727
            if (new_eflags & VM_MASK)
2728
                goto return_to_vm86;
2729
        }
2730
    } else {
2731
        /* 16 bits */
2732
        POPW(ssp, sp, sp_mask, new_eip);
2733
        POPW(ssp, sp, sp_mask, new_cs);
2734
        if (is_iret)
2735
            POPW(ssp, sp, sp_mask, new_eflags);
2736
    }
2737
#ifdef DEBUG_PCALL
2738
    if (loglevel & CPU_LOG_PCALL) {
2739
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2740
                new_cs, new_eip, shift, addend);
2741
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2742
    }
2743
#endif
2744
    if ((new_cs & 0xfffc) == 0)
2745
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2746
    if (load_segment(&e1, &e2, new_cs) != 0)
2747
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748
    if (!(e2 & DESC_S_MASK) ||
2749
        !(e2 & DESC_CS_MASK))
2750
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2751
    cpl = env->hflags & HF_CPL_MASK;
2752
    rpl = new_cs & 3;
2753
    if (rpl < cpl)
2754
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2755
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2756
    if (e2 & DESC_C_MASK) {
2757
        if (dpl > rpl)
2758
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2759
    } else {
2760
        if (dpl != rpl)
2761
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2762
    }
2763
    if (!(e2 & DESC_P_MASK))
2764
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2765

    
2766
    sp += addend;
2767
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2768
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2769
        /* return to same privilege level */
2770
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2771
                       get_seg_base(e1, e2),
2772
                       get_seg_limit(e1, e2),
2773
                       e2);
2774
    } else {
2775
        /* return to different privilege level */
2776
#ifdef TARGET_X86_64
2777
        if (shift == 2) {
2778
            POPQ(sp, new_esp);
2779
            POPQ(sp, new_ss);
2780
            new_ss &= 0xffff;
2781
        } else
2782
#endif
2783
        if (shift == 1) {
2784
            /* 32 bits */
2785
            POPL(ssp, sp, sp_mask, new_esp);
2786
            POPL(ssp, sp, sp_mask, new_ss);
2787
            new_ss &= 0xffff;
2788
        } else {
2789
            /* 16 bits */
2790
            POPW(ssp, sp, sp_mask, new_esp);
2791
            POPW(ssp, sp, sp_mask, new_ss);
2792
        }
2793
#ifdef DEBUG_PCALL
2794
        if (loglevel & CPU_LOG_PCALL) {
2795
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2796
                    new_ss, new_esp);
2797
        }
2798
#endif
2799
        if ((new_ss & 0xfffc) == 0) {
2800
#ifdef TARGET_X86_64
2801
            /* NULL ss is allowed in long mode if cpl != 3*/
2802
            /* XXX: test CS64 ? */
2803
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2804
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2805
                                       0, 0xffffffff,
2806
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2807
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2808
                                       DESC_W_MASK | DESC_A_MASK);
2809
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2810
            } else
2811
#endif
2812
            {
2813
                raise_exception_err(EXCP0D_GPF, 0);
2814
            }
2815
        } else {
2816
            if ((new_ss & 3) != rpl)
2817
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2818
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2819
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2820
            if (!(ss_e2 & DESC_S_MASK) ||
2821
                (ss_e2 & DESC_CS_MASK) ||
2822
                !(ss_e2 & DESC_W_MASK))
2823
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2824
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2825
            if (dpl != rpl)
2826
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2827
            if (!(ss_e2 & DESC_P_MASK))
2828
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2829
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2830
                                   get_seg_base(ss_e1, ss_e2),
2831
                                   get_seg_limit(ss_e1, ss_e2),
2832
                                   ss_e2);
2833
        }
2834

    
2835
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2836
                       get_seg_base(e1, e2),
2837
                       get_seg_limit(e1, e2),
2838
                       e2);
2839
        cpu_x86_set_cpl(env, rpl);
2840
        sp = new_esp;
2841
#ifdef TARGET_X86_64
2842
        if (env->hflags & HF_CS64_MASK)
2843
            sp_mask = -1;
2844
        else
2845
#endif
2846
            sp_mask = get_sp_mask(ss_e2);
2847

    
2848
        /* validate data segments */
2849
        validate_seg(R_ES, rpl);
2850
        validate_seg(R_DS, rpl);
2851
        validate_seg(R_FS, rpl);
2852
        validate_seg(R_GS, rpl);
2853

    
2854
        sp += addend;
2855
    }
2856
    SET_ESP(sp, sp_mask);
2857
    env->eip = new_eip;
2858
    if (is_iret) {
2859
        /* NOTE: 'cpl' is the _old_ CPL */
2860
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2861
        if (cpl == 0)
2862
            eflags_mask |= IOPL_MASK;
2863
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2864
        if (cpl <= iopl)
2865
            eflags_mask |= IF_MASK;
2866
        if (shift == 0)
2867
            eflags_mask &= 0xffff;
2868
        load_eflags(new_eflags, eflags_mask);
2869
    }
2870
    return;
2871

    
2872
 return_to_vm86:
2873
    POPL(ssp, sp, sp_mask, new_esp);
2874
    POPL(ssp, sp, sp_mask, new_ss);
2875
    POPL(ssp, sp, sp_mask, new_es);
2876
    POPL(ssp, sp, sp_mask, new_ds);
2877
    POPL(ssp, sp, sp_mask, new_fs);
2878
    POPL(ssp, sp, sp_mask, new_gs);
2879

    
2880
    /* modify processor state */
2881
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2882
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2883
    load_seg_vm(R_CS, new_cs & 0xffff);
2884
    cpu_x86_set_cpl(env, 3);
2885
    load_seg_vm(R_SS, new_ss & 0xffff);
2886
    load_seg_vm(R_ES, new_es & 0xffff);
2887
    load_seg_vm(R_DS, new_ds & 0xffff);
2888
    load_seg_vm(R_FS, new_fs & 0xffff);
2889
    load_seg_vm(R_GS, new_gs & 0xffff);
2890

    
2891
    env->eip = new_eip & 0xffff;
2892
    ESP = new_esp;
2893
}
2894

    
2895
void helper_iret_protected(int shift, int next_eip)
2896
{
2897
    int tss_selector, type;
2898
    uint32_t e1, e2;
2899

    
2900
    /* specific case for TSS */
2901
    if (env->eflags & NT_MASK) {
2902
#ifdef TARGET_X86_64
2903
        if (env->hflags & HF_LMA_MASK)
2904
            raise_exception_err(EXCP0D_GPF, 0);
2905
#endif
2906
        tss_selector = lduw_kernel(env->tr.base + 0);
2907
        if (tss_selector & 4)
2908
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2909
        if (load_segment(&e1, &e2, tss_selector) != 0)
2910
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2911
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2912
        /* NOTE: we check both segment and busy TSS */
2913
        if (type != 3)
2914
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2915
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2916
    } else {
2917
        helper_ret_protected(shift, 1, 0);
2918
    }
2919
    env->hflags2 &= ~HF2_NMI_MASK;
2920
#ifdef USE_KQEMU
2921
    if (kqemu_is_ok(env)) {
2922
        CC_OP = CC_OP_EFLAGS;
2923
        env->exception_index = -1;
2924
        cpu_loop_exit();
2925
    }
2926
#endif
2927
}
2928

    
2929
void helper_lret_protected(int shift, int addend)
2930
{
2931
    helper_ret_protected(shift, 0, addend);
2932
#ifdef USE_KQEMU
2933
    if (kqemu_is_ok(env)) {
2934
        env->exception_index = -1;
2935
        cpu_loop_exit();
2936
    }
2937
#endif
2938
}
2939

    
2940
void helper_sysenter(void)
2941
{
2942
    if (env->sysenter_cs == 0) {
2943
        raise_exception_err(EXCP0D_GPF, 0);
2944
    }
2945
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2946
    cpu_x86_set_cpl(env, 0);
2947

    
2948
#ifdef TARGET_X86_64
2949
    if (env->hflags & HF_LMA_MASK) {
2950
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2951
                               0, 0xffffffff,
2952
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2953
                               DESC_S_MASK |
2954
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2955
    } else
2956
#endif
2957
    {
2958
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2959
                               0, 0xffffffff,
2960
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2961
                               DESC_S_MASK |
2962
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2963
    }
2964
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2965
                           0, 0xffffffff,
2966
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2967
                           DESC_S_MASK |
2968
                           DESC_W_MASK | DESC_A_MASK);
2969
    ESP = env->sysenter_esp;
2970
    EIP = env->sysenter_eip;
2971
}
2972

    
2973
void helper_sysexit(int dflag)
2974
{
2975
    int cpl;
2976

    
2977
    cpl = env->hflags & HF_CPL_MASK;
2978
    if (env->sysenter_cs == 0 || cpl != 0) {
2979
        raise_exception_err(EXCP0D_GPF, 0);
2980
    }
2981
    cpu_x86_set_cpl(env, 3);
2982
#ifdef TARGET_X86_64
2983
    if (dflag == 2) {
2984
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2985
                               0, 0xffffffff,
2986
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2987
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2988
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2989
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2990
                               0, 0xffffffff,
2991
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2992
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2993
                               DESC_W_MASK | DESC_A_MASK);
2994
    } else
2995
#endif
2996
    {
2997
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2998
                               0, 0xffffffff,
2999
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3000
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3001
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3002
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3003
                               0, 0xffffffff,
3004
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3005
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3006
                               DESC_W_MASK | DESC_A_MASK);
3007
    }
3008
    ESP = ECX;
3009
    EIP = EDX;
3010
#ifdef USE_KQEMU
3011
    if (kqemu_is_ok(env)) {
3012
        env->exception_index = -1;
3013
        cpu_loop_exit();
3014
    }
3015
#endif
3016
}
3017

    
3018
#if defined(CONFIG_USER_ONLY)
3019
target_ulong helper_read_crN(int reg)
3020
{
3021
    return 0;
3022
}
3023

    
3024
void helper_write_crN(int reg, target_ulong t0)
3025
{
3026
}
3027
#else
3028
target_ulong helper_read_crN(int reg)
3029
{
3030
    target_ulong val;
3031

    
3032
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3033
    switch(reg) {
3034
    default:
3035
        val = env->cr[reg];
3036
        break;
3037
    case 8:
3038
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3039
            val = cpu_get_apic_tpr(env);
3040
        } else {
3041
            val = env->v_tpr;
3042
        }
3043
        break;
3044
    }
3045
    return val;
3046
}
3047

    
3048
void helper_write_crN(int reg, target_ulong t0)
3049
{
3050
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3051
    switch(reg) {
3052
    case 0:
3053
        cpu_x86_update_cr0(env, t0);
3054
        break;
3055
    case 3:
3056
        cpu_x86_update_cr3(env, t0);
3057
        break;
3058
    case 4:
3059
        cpu_x86_update_cr4(env, t0);
3060
        break;
3061
    case 8:
3062
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3063
            cpu_set_apic_tpr(env, t0);
3064
        }
3065
        env->v_tpr = t0 & 0x0f;
3066
        break;
3067
    default:
3068
        env->cr[reg] = t0;
3069
        break;
3070
    }
3071
}
3072
#endif
3073

    
3074
void helper_lmsw(target_ulong t0)
3075
{
3076
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3077
       if already set to one. */
3078
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3079
    helper_write_crN(0, t0);
3080
}
3081

    
3082
void helper_clts(void)
3083
{
3084
    env->cr[0] &= ~CR0_TS_MASK;
3085
    env->hflags &= ~HF_TS_MASK;
3086
}
3087

    
3088
/* XXX: do more */
3089
void helper_movl_drN_T0(int reg, target_ulong t0)
3090
{
3091
    env->dr[reg] = t0;
3092
}
3093

    
3094
void helper_invlpg(target_ulong addr)
3095
{
3096
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3097
    tlb_flush_page(env, addr);
3098
}
3099

    
3100
void helper_rdtsc(void)
3101
{
3102
    uint64_t val;
3103

    
3104
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3105
        raise_exception(EXCP0D_GPF);
3106
    }
3107
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3108

    
3109
    val = cpu_get_tsc(env) + env->tsc_offset;
3110
    EAX = (uint32_t)(val);
3111
    EDX = (uint32_t)(val >> 32);
3112
}
3113

    
3114
void helper_rdpmc(void)
3115
{
3116
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3117
        raise_exception(EXCP0D_GPF);
3118
    }
3119
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3120
    
3121
    /* currently unimplemented */
3122
    raise_exception_err(EXCP06_ILLOP, 0);
3123
}
3124

    
3125
#if defined(CONFIG_USER_ONLY)
3126
void helper_wrmsr(void)
3127
{
3128
}
3129

    
3130
void helper_rdmsr(void)
3131
{
3132
}
3133
#else
3134
void helper_wrmsr(void)
3135
{
3136
    uint64_t val;
3137

    
3138
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3139

    
3140
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3141

    
3142
    switch((uint32_t)ECX) {
3143
    case MSR_IA32_SYSENTER_CS:
3144
        env->sysenter_cs = val & 0xffff;
3145
        break;
3146
    case MSR_IA32_SYSENTER_ESP:
3147
        env->sysenter_esp = val;
3148
        break;
3149
    case MSR_IA32_SYSENTER_EIP:
3150
        env->sysenter_eip = val;
3151
        break;
3152
    case MSR_IA32_APICBASE:
3153
        cpu_set_apic_base(env, val);
3154
        break;
3155
    case MSR_EFER:
3156
        {
3157
            uint64_t update_mask;
3158
            update_mask = 0;
3159
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3160
                update_mask |= MSR_EFER_SCE;
3161
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3162
                update_mask |= MSR_EFER_LME;
3163
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3164
                update_mask |= MSR_EFER_FFXSR;
3165
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3166
                update_mask |= MSR_EFER_NXE;
3167
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3168
                update_mask |= MSR_EFER_SVME;
3169
            cpu_load_efer(env, (env->efer & ~update_mask) |
3170
                          (val & update_mask));
3171
        }
3172
        break;
3173
    case MSR_STAR:
3174
        env->star = val;
3175
        break;
3176
    case MSR_PAT:
3177
        env->pat = val;
3178
        break;
3179
    case MSR_VM_HSAVE_PA:
3180
        env->vm_hsave = val;
3181
        break;
3182
#ifdef TARGET_X86_64
3183
    case MSR_LSTAR:
3184
        env->lstar = val;
3185
        break;
3186
    case MSR_CSTAR:
3187
        env->cstar = val;
3188
        break;
3189
    case MSR_FMASK:
3190
        env->fmask = val;
3191
        break;
3192
    case MSR_FSBASE:
3193
        env->segs[R_FS].base = val;
3194
        break;
3195
    case MSR_GSBASE:
3196
        env->segs[R_GS].base = val;
3197
        break;
3198
    case MSR_KERNELGSBASE:
3199
        env->kernelgsbase = val;
3200
        break;
3201
#endif
3202
    default:
3203
        /* XXX: exception ? */
3204
        break;
3205
    }
3206
}
3207

    
3208
void helper_rdmsr(void)
3209
{
3210
    uint64_t val;
3211

    
3212
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3213

    
3214
    switch((uint32_t)ECX) {
3215
    case MSR_IA32_SYSENTER_CS:
3216
        val = env->sysenter_cs;
3217
        break;
3218
    case MSR_IA32_SYSENTER_ESP:
3219
        val = env->sysenter_esp;
3220
        break;
3221
    case MSR_IA32_SYSENTER_EIP:
3222
        val = env->sysenter_eip;
3223
        break;
3224
    case MSR_IA32_APICBASE:
3225
        val = cpu_get_apic_base(env);
3226
        break;
3227
    case MSR_EFER:
3228
        val = env->efer;
3229
        break;
3230
    case MSR_STAR:
3231
        val = env->star;
3232
        break;
3233
    case MSR_PAT:
3234
        val = env->pat;
3235
        break;
3236
    case MSR_VM_HSAVE_PA:
3237
        val = env->vm_hsave;
3238
        break;
3239
    case MSR_IA32_PERF_STATUS:
3240
        /* tsc_increment_by_tick */
3241
        val = 1000ULL;
3242
        /* CPU multiplier */
3243
        val |= (((uint64_t)4ULL) << 40);
3244
        break;
3245
#ifdef TARGET_X86_64
3246
    case MSR_LSTAR:
3247
        val = env->lstar;
3248
        break;
3249
    case MSR_CSTAR:
3250
        val = env->cstar;
3251
        break;
3252
    case MSR_FMASK:
3253
        val = env->fmask;
3254
        break;
3255
    case MSR_FSBASE:
3256
        val = env->segs[R_FS].base;
3257
        break;
3258
    case MSR_GSBASE:
3259
        val = env->segs[R_GS].base;
3260
        break;
3261
    case MSR_KERNELGSBASE:
3262
        val = env->kernelgsbase;
3263
        break;
3264
#endif
3265
#ifdef USE_KQEMU
3266
    case MSR_QPI_COMMBASE:
3267
        if (env->kqemu_enabled) {
3268
            val = kqemu_comm_base;
3269
        } else {
3270
            val = 0;
3271
        }
3272
        break;
3273
#endif
3274
    default:
3275
        /* XXX: exception ? */
3276
        val = 0;
3277
        break;
3278
    }
3279
    EAX = (uint32_t)(val);
3280
    EDX = (uint32_t)(val >> 32);
3281
}
3282
#endif
3283

    
3284
target_ulong helper_lsl(target_ulong selector1)
3285
{
3286
    unsigned int limit;
3287
    uint32_t e1, e2, eflags, selector;
3288
    int rpl, dpl, cpl, type;
3289

    
3290
    selector = selector1 & 0xffff;
3291
    eflags = cc_table[CC_OP].compute_all();
3292
    if (load_segment(&e1, &e2, selector) != 0)
3293
        goto fail;
3294
    rpl = selector & 3;
3295
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3296
    cpl = env->hflags & HF_CPL_MASK;
3297
    if (e2 & DESC_S_MASK) {
3298
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3299
            /* conforming */
3300
        } else {
3301
            if (dpl < cpl || dpl < rpl)
3302
                goto fail;
3303
        }
3304
    } else {
3305
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3306
        switch(type) {
3307
        case 1:
3308
        case 2:
3309
        case 3:
3310
        case 9:
3311
        case 11:
3312
            break;
3313
        default:
3314
            goto fail;
3315
        }
3316
        if (dpl < cpl || dpl < rpl) {
3317
        fail:
3318
            CC_SRC = eflags & ~CC_Z;
3319
            return 0;
3320
        }
3321
    }
3322
    limit = get_seg_limit(e1, e2);
3323
    CC_SRC = eflags | CC_Z;
3324
    return limit;
3325
}
3326

    
3327
target_ulong helper_lar(target_ulong selector1)
3328
{
3329
    uint32_t e1, e2, eflags, selector;
3330
    int rpl, dpl, cpl, type;
3331

    
3332
    selector = selector1 & 0xffff;
3333
    eflags = cc_table[CC_OP].compute_all();
3334
    if ((selector & 0xfffc) == 0)
3335
        goto fail;
3336
    if (load_segment(&e1, &e2, selector) != 0)
3337
        goto fail;
3338
    rpl = selector & 3;
3339
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3340
    cpl = env->hflags & HF_CPL_MASK;
3341
    if (e2 & DESC_S_MASK) {
3342
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3343
            /* conforming */
3344
        } else {
3345
            if (dpl < cpl || dpl < rpl)
3346
                goto fail;
3347
        }
3348
    } else {
3349
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3350
        switch(type) {
3351
        case 1:
3352
        case 2:
3353
        case 3:
3354
        case 4:
3355
        case 5:
3356
        case 9:
3357
        case 11:
3358
        case 12:
3359
            break;
3360
        default:
3361
            goto fail;
3362
        }
3363
        if (dpl < cpl || dpl < rpl) {
3364
        fail:
3365
            CC_SRC = eflags & ~CC_Z;
3366
            return 0;
3367
        }
3368
    }
3369
    CC_SRC = eflags | CC_Z;
3370
    return e2 & 0x00f0ff00;
3371
}
3372

    
3373
void helper_verr(target_ulong selector1)
3374
{
3375
    uint32_t e1, e2, eflags, selector;
3376
    int rpl, dpl, cpl;
3377

    
3378
    selector = selector1 & 0xffff;
3379
    eflags = cc_table[CC_OP].compute_all();
3380
    if ((selector & 0xfffc) == 0)
3381
        goto fail;
3382
    if (load_segment(&e1, &e2, selector) != 0)
3383
        goto fail;
3384
    if (!(e2 & DESC_S_MASK))
3385
        goto fail;
3386
    rpl = selector & 3;
3387
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3388
    cpl = env->hflags & HF_CPL_MASK;
3389
    if (e2 & DESC_CS_MASK) {
3390
        if (!(e2 & DESC_R_MASK))
3391
            goto fail;
3392
        if (!(e2 & DESC_C_MASK)) {
3393
            if (dpl < cpl || dpl < rpl)
3394
                goto fail;
3395
        }
3396
    } else {
3397
        if (dpl < cpl || dpl < rpl) {
3398
        fail:
3399
            CC_SRC = eflags & ~CC_Z;
3400
            return;
3401
        }
3402
    }
3403
    CC_SRC = eflags | CC_Z;
3404
}
3405

    
3406
void helper_verw(target_ulong selector1)
3407
{
3408
    uint32_t e1, e2, eflags, selector;
3409
    int rpl, dpl, cpl;
3410

    
3411
    selector = selector1 & 0xffff;
3412
    eflags = cc_table[CC_OP].compute_all();
3413
    if ((selector & 0xfffc) == 0)
3414
        goto fail;
3415
    if (load_segment(&e1, &e2, selector) != 0)
3416
        goto fail;
3417
    if (!(e2 & DESC_S_MASK))
3418
        goto fail;
3419
    rpl = selector & 3;
3420
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3421
    cpl = env->hflags & HF_CPL_MASK;
3422
    if (e2 & DESC_CS_MASK) {
3423
        goto fail;
3424
    } else {
3425
        if (dpl < cpl || dpl < rpl)
3426
            goto fail;
3427
        if (!(e2 & DESC_W_MASK)) {
3428
        fail:
3429
            CC_SRC = eflags & ~CC_Z;
3430
            return;
3431
        }
3432
    }
3433
    CC_SRC = eflags | CC_Z;
3434
}
3435

    
3436
/* x87 FPU helpers */
3437

    
3438
static void fpu_set_exception(int mask)
3439
{
3440
    env->fpus |= mask;
3441
    if (env->fpus & (~env->fpuc & FPUC_EM))
3442
        env->fpus |= FPUS_SE | FPUS_B;
3443
}
3444

    
3445
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3446
{
3447
    if (b == 0.0)
3448
        fpu_set_exception(FPUS_ZE);
3449
    return a / b;
3450
}
3451

    
3452
void fpu_raise_exception(void)
3453
{
3454
    if (env->cr[0] & CR0_NE_MASK) {
3455
        raise_exception(EXCP10_COPR);
3456
    }
3457
#if !defined(CONFIG_USER_ONLY)
3458
    else {
3459
        cpu_set_ferr(env);
3460
    }
3461
#endif
3462
}
3463

    
3464
void helper_flds_FT0(uint32_t val)
3465
{
3466
    union {
3467
        float32 f;
3468
        uint32_t i;
3469
    } u;
3470
    u.i = val;
3471
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3472
}
3473

    
3474
void helper_fldl_FT0(uint64_t val)
3475
{
3476
    union {
3477
        float64 f;
3478
        uint64_t i;
3479
    } u;
3480
    u.i = val;
3481
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3482
}
3483

    
3484
void helper_fildl_FT0(int32_t val)
3485
{
3486
    FT0 = int32_to_floatx(val, &env->fp_status);
3487
}
3488

    
3489
void helper_flds_ST0(uint32_t val)
3490
{
3491
    int new_fpstt;
3492
    union {
3493
        float32 f;
3494
        uint32_t i;
3495
    } u;
3496
    new_fpstt = (env->fpstt - 1) & 7;
3497
    u.i = val;
3498
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3499
    env->fpstt = new_fpstt;
3500
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3501
}
3502

    
3503
void helper_fldl_ST0(uint64_t val)
3504
{
3505
    int new_fpstt;
3506
    union {
3507
        float64 f;
3508
        uint64_t i;
3509
    } u;
3510
    new_fpstt = (env->fpstt - 1) & 7;
3511
    u.i = val;
3512
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3513
    env->fpstt = new_fpstt;
3514
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3515
}
3516

    
3517
void helper_fildl_ST0(int32_t val)
3518
{
3519
    int new_fpstt;
3520
    new_fpstt = (env->fpstt - 1) & 7;
3521
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3522
    env->fpstt = new_fpstt;
3523
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3524
}
3525

    
3526
void helper_fildll_ST0(int64_t val)
3527
{
3528
    int new_fpstt;
3529
    new_fpstt = (env->fpstt - 1) & 7;
3530
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3531
    env->fpstt = new_fpstt;
3532
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3533
}
3534

    
3535
uint32_t helper_fsts_ST0(void)
3536
{
3537
    union {
3538
        float32 f;
3539
        uint32_t i;
3540
    } u;
3541
    u.f = floatx_to_float32(ST0, &env->fp_status);
3542
    return u.i;
3543
}
3544

    
3545
uint64_t helper_fstl_ST0(void)
3546
{
3547
    union {
3548
        float64 f;
3549
        uint64_t i;
3550
    } u;
3551
    u.f = floatx_to_float64(ST0, &env->fp_status);
3552
    return u.i;
3553
}
3554

    
3555
int32_t helper_fist_ST0(void)
3556
{
3557
    int32_t val;
3558
    val = floatx_to_int32(ST0, &env->fp_status);
3559
    if (val != (int16_t)val)
3560
        val = -32768;
3561
    return val;
3562
}
3563

    
3564
int32_t helper_fistl_ST0(void)
3565
{
3566
    int32_t val;
3567
    val = floatx_to_int32(ST0, &env->fp_status);
3568
    return val;
3569
}
3570

    
3571
int64_t helper_fistll_ST0(void)
3572
{
3573
    int64_t val;
3574
    val = floatx_to_int64(ST0, &env->fp_status);
3575
    return val;
3576
}
3577

    
3578
int32_t helper_fistt_ST0(void)
3579
{
3580
    int32_t val;
3581
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3582
    if (val != (int16_t)val)
3583
        val = -32768;
3584
    return val;
3585
}
3586

    
3587
int32_t helper_fisttl_ST0(void)
3588
{
3589
    int32_t val;
3590
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3591
    return val;
3592
}
3593

    
3594
int64_t helper_fisttll_ST0(void)
3595
{
3596
    int64_t val;
3597
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3598
    return val;
3599
}
3600

    
3601
void helper_fldt_ST0(target_ulong ptr)
3602
{
3603
    int new_fpstt;
3604
    new_fpstt = (env->fpstt - 1) & 7;
3605
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3606
    env->fpstt = new_fpstt;
3607
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3608
}
3609

    
3610
void helper_fstt_ST0(target_ulong ptr)
3611
{
3612
    helper_fstt(ST0, ptr);
3613
}
3614

    
3615
void helper_fpush(void)
3616
{
3617
    fpush();
3618
}
3619

    
3620
void helper_fpop(void)
3621
{
3622
    fpop();
3623
}
3624

    
3625
void helper_fdecstp(void)
3626
{
3627
    env->fpstt = (env->fpstt - 1) & 7;
3628
    env->fpus &= (~0x4700);
3629
}
3630

    
3631
void helper_fincstp(void)
3632
{
3633
    env->fpstt = (env->fpstt + 1) & 7;
3634
    env->fpus &= (~0x4700);
3635
}
3636

    
3637
/* FPU move */
3638

    
3639
void helper_ffree_STN(int st_index)
3640
{
3641
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3642
}
3643

    
3644
void helper_fmov_ST0_FT0(void)
3645
{
3646
    ST0 = FT0;
3647
}
3648

    
3649
void helper_fmov_FT0_STN(int st_index)
3650
{
3651
    FT0 = ST(st_index);
3652
}
3653

    
3654
void helper_fmov_ST0_STN(int st_index)
3655
{
3656
    ST0 = ST(st_index);
3657
}
3658

    
3659
void helper_fmov_STN_ST0(int st_index)
3660
{
3661
    ST(st_index) = ST0;
3662
}
3663

    
3664
void helper_fxchg_ST0_STN(int st_index)
3665
{
3666
    CPU86_LDouble tmp;
3667
    tmp = ST(st_index);
3668
    ST(st_index) = ST0;
3669
    ST0 = tmp;
3670
}
3671

    
3672
/* FPU operations */
3673

    
3674
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3675

    
3676
void helper_fcom_ST0_FT0(void)
3677
{
3678
    int ret;
3679

    
3680
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3681
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3682
    FORCE_RET();
3683
}
3684

    
3685
void helper_fucom_ST0_FT0(void)
3686
{
3687
    int ret;
3688

    
3689
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3690
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3691
    FORCE_RET();
3692
}
3693

    
3694
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3695

    
3696
void helper_fcomi_ST0_FT0(void)
3697
{
3698
    int eflags;
3699
    int ret;
3700

    
3701
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3702
    eflags = cc_table[CC_OP].compute_all();
3703
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3704
    CC_SRC = eflags;
3705
    FORCE_RET();
3706
}
3707

    
3708
void helper_fucomi_ST0_FT0(void)
3709
{
3710
    int eflags;
3711
    int ret;
3712

    
3713
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3714
    eflags = cc_table[CC_OP].compute_all();
3715
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3716
    CC_SRC = eflags;
3717
    FORCE_RET();
3718
}
3719

    
3720
void helper_fadd_ST0_FT0(void)
3721
{
3722
    ST0 += FT0;
3723
}
3724

    
3725
void helper_fmul_ST0_FT0(void)
3726
{
3727
    ST0 *= FT0;
3728
}
3729

    
3730
void helper_fsub_ST0_FT0(void)
3731
{
3732
    ST0 -= FT0;
3733
}
3734

    
3735
void helper_fsubr_ST0_FT0(void)
3736
{
3737
    ST0 = FT0 - ST0;
3738
}
3739

    
3740
void helper_fdiv_ST0_FT0(void)
3741
{
3742
    ST0 = helper_fdiv(ST0, FT0);
3743
}
3744

    
3745
void helper_fdivr_ST0_FT0(void)
3746
{
3747
    ST0 = helper_fdiv(FT0, ST0);
3748
}
3749

    
3750
/* fp operations between STN and ST0 */
3751

    
3752
void helper_fadd_STN_ST0(int st_index)
3753
{
3754
    ST(st_index) += ST0;
3755
}
3756

    
3757
void helper_fmul_STN_ST0(int st_index)
3758
{
3759
    ST(st_index) *= ST0;
3760
}
3761

    
3762
void helper_fsub_STN_ST0(int st_index)
3763
{
3764
    ST(st_index) -= ST0;
3765
}
3766

    
3767
void helper_fsubr_STN_ST0(int st_index)
3768
{
3769
    CPU86_LDouble *p;
3770
    p = &ST(st_index);
3771
    *p = ST0 - *p;
3772
}
3773

    
3774
void helper_fdiv_STN_ST0(int st_index)
3775
{
3776
    CPU86_LDouble *p;
3777
    p = &ST(st_index);
3778
    *p = helper_fdiv(*p, ST0);
3779
}
3780

    
3781
void helper_fdivr_STN_ST0(int st_index)
3782
{
3783
    CPU86_LDouble *p;
3784
    p = &ST(st_index);
3785
    *p = helper_fdiv(ST0, *p);
3786
}
3787

    
3788
/* misc FPU operations */
3789
void helper_fchs_ST0(void)
3790
{
3791
    ST0 = floatx_chs(ST0);
3792
}
3793

    
3794
void helper_fabs_ST0(void)
3795
{
3796
    ST0 = floatx_abs(ST0);
3797
}
3798

    
3799
void helper_fld1_ST0(void)
3800
{
3801
    ST0 = f15rk[1];
3802
}
3803

    
3804
void helper_fldl2t_ST0(void)
3805
{
3806
    ST0 = f15rk[6];
3807
}
3808

    
3809
void helper_fldl2e_ST0(void)
3810
{
3811
    ST0 = f15rk[5];
3812
}
3813

    
3814
void helper_fldpi_ST0(void)
3815
{
3816
    ST0 = f15rk[2];
3817
}
3818

    
3819
void helper_fldlg2_ST0(void)
3820
{
3821
    ST0 = f15rk[3];
3822
}
3823

    
3824
void helper_fldln2_ST0(void)
3825
{
3826
    ST0 = f15rk[4];
3827
}
3828

    
3829
void helper_fldz_ST0(void)
3830
{
3831
    ST0 = f15rk[0];
3832
}
3833

    
3834
void helper_fldz_FT0(void)
3835
{
3836
    FT0 = f15rk[0];
3837
}
3838

    
3839
uint32_t helper_fnstsw(void)
3840
{
3841
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3842
}
3843

    
3844
uint32_t helper_fnstcw(void)
3845
{
3846
    return env->fpuc;
3847
}
3848

    
3849
static void update_fp_status(void)
3850
{
3851
    int rnd_type;
3852

    
3853
    /* set rounding mode */
3854
    switch(env->fpuc & RC_MASK) {
3855
    default:
3856
    case RC_NEAR:
3857
        rnd_type = float_round_nearest_even;
3858
        break;
3859
    case RC_DOWN:
3860
        rnd_type = float_round_down;
3861
        break;
3862
    case RC_UP:
3863
        rnd_type = float_round_up;
3864
        break;
3865
    case RC_CHOP:
3866
        rnd_type = float_round_to_zero;
3867
        break;
3868
    }
3869
    set_float_rounding_mode(rnd_type, &env->fp_status);
3870
#ifdef FLOATX80
3871
    switch((env->fpuc >> 8) & 3) {
3872
    case 0:
3873
        rnd_type = 32;
3874
        break;
3875
    case 2:
3876
        rnd_type = 64;
3877
        break;
3878
    case 3:
3879
    default:
3880
        rnd_type = 80;
3881
        break;
3882
    }
3883
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3884
#endif
3885
}
3886

    
3887
void helper_fldcw(uint32_t val)
3888
{
3889
    env->fpuc = val;
3890
    update_fp_status();
3891
}
3892

    
3893
void helper_fclex(void)
3894
{
3895
    env->fpus &= 0x7f00;
3896
}
3897

    
3898
void helper_fwait(void)
3899
{
3900
    if (env->fpus & FPUS_SE)
3901
        fpu_raise_exception();
3902
    FORCE_RET();
3903
}
3904

    
3905
void helper_fninit(void)
3906
{
3907
    env->fpus = 0;
3908
    env->fpstt = 0;
3909
    env->fpuc = 0x37f;
3910
    env->fptags[0] = 1;
3911
    env->fptags[1] = 1;
3912
    env->fptags[2] = 1;
3913
    env->fptags[3] = 1;
3914
    env->fptags[4] = 1;
3915
    env->fptags[5] = 1;
3916
    env->fptags[6] = 1;
3917
    env->fptags[7] = 1;
3918
}
3919

    
3920
/* BCD ops */
3921

    
3922
void helper_fbld_ST0(target_ulong ptr)
3923
{
3924
    CPU86_LDouble tmp;
3925
    uint64_t val;
3926
    unsigned int v;
3927
    int i;
3928

    
3929
    val = 0;
3930
    for(i = 8; i >= 0; i--) {
3931
        v = ldub(ptr + i);
3932
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3933
    }
3934
    tmp = val;
3935
    if (ldub(ptr + 9) & 0x80)
3936
        tmp = -tmp;
3937
    fpush();
3938
    ST0 = tmp;
3939
}
3940

    
3941
void helper_fbst_ST0(target_ulong ptr)
3942
{
3943
    int v;
3944
    target_ulong mem_ref, mem_end;
3945
    int64_t val;
3946

    
3947
    val = floatx_to_int64(ST0, &env->fp_status);
3948
    mem_ref = ptr;
3949
    mem_end = mem_ref + 9;
3950
    if (val < 0) {
3951
        stb(mem_end, 0x80);
3952
        val = -val;
3953
    } else {
3954
        stb(mem_end, 0x00);
3955
    }
3956
    while (mem_ref < mem_end) {
3957
        if (val == 0)
3958
            break;
3959
        v = val % 100;
3960
        val = val / 100;
3961
        v = ((v / 10) << 4) | (v % 10);
3962
        stb(mem_ref++, v);
3963
    }
3964
    while (mem_ref < mem_end) {
3965
        stb(mem_ref++, 0);
3966
    }
3967
}
3968

    
3969
void helper_f2xm1(void)
3970
{
3971
    ST0 = pow(2.0,ST0) - 1.0;
3972
}
3973

    
3974
void helper_fyl2x(void)
3975
{
3976
    CPU86_LDouble fptemp;
3977

    
3978
    fptemp = ST0;
3979
    if (fptemp>0.0){
3980
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3981
        ST1 *= fptemp;
3982
        fpop();
3983
    } else {
3984
        env->fpus &= (~0x4700);
3985
        env->fpus |= 0x400;
3986
    }
3987
}
3988

    
3989
void helper_fptan(void)
3990
{
3991
    CPU86_LDouble fptemp;
3992

    
3993
    fptemp = ST0;
3994
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3995
        env->fpus |= 0x400;
3996
    } else {
3997
        ST0 = tan(fptemp);
3998
        fpush();
3999
        ST0 = 1.0;
4000
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4001
        /* the above code is for  |arg| < 2**52 only */
4002
    }
4003
}
4004

    
4005
void helper_fpatan(void)
4006
{
4007
    CPU86_LDouble fptemp, fpsrcop;
4008

    
4009
    fpsrcop = ST1;
4010
    fptemp = ST0;
4011
    ST1 = atan2(fpsrcop,fptemp);
4012
    fpop();
4013
}
4014

    
4015
void helper_fxtract(void)
4016
{
4017
    CPU86_LDoubleU temp;
4018
    unsigned int expdif;
4019

    
4020
    temp.d = ST0;
4021
    expdif = EXPD(temp) - EXPBIAS;
4022
    /*DP exponent bias*/
4023
    ST0 = expdif;
4024
    fpush();
4025
    BIASEXPONENT(temp);
4026
    ST0 = temp.d;
4027
}
4028

    
4029
void helper_fprem1(void)
4030
{
4031
    CPU86_LDouble dblq, fpsrcop, fptemp;
4032
    CPU86_LDoubleU fpsrcop1, fptemp1;
4033
    int expdif;
4034
    signed long long int q;
4035

    
4036
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4037
        ST0 = 0.0 / 0.0; /* NaN */
4038
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4039
        return;
4040
    }
4041

    
4042
    fpsrcop = ST0;
4043
    fptemp = ST1;
4044
    fpsrcop1.d = fpsrcop;
4045
    fptemp1.d = fptemp;
4046
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4047

    
4048
    if (expdif < 0) {
4049
        /* optimisation? taken from the AMD docs */
4050
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4051
        /* ST0 is unchanged */
4052
        return;
4053
    }
4054

    
4055
    if (expdif < 53) {
4056
        dblq = fpsrcop / fptemp;
4057
        /* round dblq towards nearest integer */
4058
        dblq = rint(dblq);
4059
        ST0 = fpsrcop - fptemp * dblq;
4060

    
4061
        /* convert dblq to q by truncating towards zero */
4062
        if (dblq < 0.0)
4063
           q = (signed long long int)(-dblq);
4064
        else
4065
           q = (signed long long int)dblq;
4066

    
4067
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4068
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4069
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4070
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4071
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4072
    } else {
4073
        env->fpus |= 0x400;  /* C2 <-- 1 */
4074
        fptemp = pow(2.0, expdif - 50);
4075
        fpsrcop = (ST0 / ST1) / fptemp;
4076
        /* fpsrcop = integer obtained by chopping */
4077
        fpsrcop = (fpsrcop < 0.0) ?
4078
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4079
        ST0 -= (ST1 * fpsrcop * fptemp);
4080
    }
4081
}
4082

    
4083
void helper_fprem(void)
4084
{
4085
    CPU86_LDouble dblq, fpsrcop, fptemp;
4086
    CPU86_LDoubleU fpsrcop1, fptemp1;
4087
    int expdif;
4088
    signed long long int q;
4089

    
4090
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4091
       ST0 = 0.0 / 0.0; /* NaN */
4092
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4093
       return;
4094
    }
4095

    
4096
    fpsrcop = (CPU86_LDouble)ST0;
4097
    fptemp = (CPU86_LDouble)ST1;
4098
    fpsrcop1.d = fpsrcop;
4099
    fptemp1.d = fptemp;
4100
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4101

    
4102
    if (expdif < 0) {
4103
        /* optimisation? taken from the AMD docs */
4104
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4105
        /* ST0 is unchanged */
4106
        return;
4107
    }
4108

    
4109
    if ( expdif < 53 ) {
4110
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4111
        /* round dblq towards zero */
4112
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4113
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4114

    
4115
        /* convert dblq to q by truncating towards zero */
4116
        if (dblq < 0.0)
4117
           q = (signed long long int)(-dblq);
4118
        else
4119
           q = (signed long long int)dblq;
4120

    
4121
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4122
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4123
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4124
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4125
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4126
    } else {
4127
        int N = 32 + (expdif % 32); /* as per AMD docs */
4128
        env->fpus |= 0x400;  /* C2 <-- 1 */
4129
        fptemp = pow(2.0, (double)(expdif - N));
4130
        fpsrcop = (ST0 / ST1) / fptemp;
4131
        /* fpsrcop = integer obtained by chopping */
4132
        fpsrcop = (fpsrcop < 0.0) ?
4133
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4134
        ST0 -= (ST1 * fpsrcop * fptemp);
4135
    }
4136
}
4137

    
4138
void helper_fyl2xp1(void)
4139
{
4140
    CPU86_LDouble fptemp;
4141

    
4142
    fptemp = ST0;
4143
    if ((fptemp+1.0)>0.0) {
4144
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4145
        ST1 *= fptemp;
4146
        fpop();
4147
    } else {
4148
        env->fpus &= (~0x4700);
4149
        env->fpus |= 0x400;
4150
    }
4151
}
4152

    
4153
void helper_fsqrt(void)
4154
{
4155
    CPU86_LDouble fptemp;
4156

    
4157
    fptemp = ST0;
4158
    if (fptemp<0.0) {
4159
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4160
        env->fpus |= 0x400;
4161
    }
4162
    ST0 = sqrt(fptemp);
4163
}
4164

    
4165
void helper_fsincos(void)
4166
{
4167
    CPU86_LDouble fptemp;
4168

    
4169
    fptemp = ST0;
4170
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4171
        env->fpus |= 0x400;
4172
    } else {
4173
        ST0 = sin(fptemp);
4174
        fpush();
4175
        ST0 = cos(fptemp);
4176
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4177
        /* the above code is for  |arg| < 2**63 only */
4178
    }
4179
}
4180

    
4181
void helper_frndint(void)
4182
{
4183
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4184
}
4185

    
4186
void helper_fscale(void)
4187
{
4188
    ST0 = ldexp (ST0, (int)(ST1));
4189
}
4190

    
4191
void helper_fsin(void)
4192
{
4193
    CPU86_LDouble fptemp;
4194

    
4195
    fptemp = ST0;
4196
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4197
        env->fpus |= 0x400;
4198
    } else {
4199
        ST0 = sin(fptemp);
4200
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4201
        /* the above code is for  |arg| < 2**53 only */
4202
    }
4203
}
4204

    
4205
void helper_fcos(void)
4206
{
4207
    CPU86_LDouble fptemp;
4208

    
4209
    fptemp = ST0;
4210
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4211
        env->fpus |= 0x400;
4212
    } else {
4213
        ST0 = cos(fptemp);
4214
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4215
        /* the above code is for  |arg5 < 2**63 only */
4216
    }
4217
}
4218

    
4219
void helper_fxam_ST0(void)
4220
{
4221
    CPU86_LDoubleU temp;
4222
    int expdif;
4223

    
4224
    temp.d = ST0;
4225

    
4226
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4227
    if (SIGND(temp))
4228
        env->fpus |= 0x200; /* C1 <-- 1 */
4229

    
4230
    /* XXX: test fptags too */
4231
    expdif = EXPD(temp);
4232
    if (expdif == MAXEXPD) {
4233
#ifdef USE_X86LDOUBLE
4234
        if (MANTD(temp) == 0x8000000000000000ULL)
4235
#else
4236
        if (MANTD(temp) == 0)
4237
#endif
4238
            env->fpus |=  0x500 /*Infinity*/;
4239
        else
4240
            env->fpus |=  0x100 /*NaN*/;
4241
    } else if (expdif == 0) {
4242
        if (MANTD(temp) == 0)
4243
            env->fpus |=  0x4000 /*Zero*/;
4244
        else
4245
            env->fpus |= 0x4400 /*Denormal*/;
4246
    } else {
4247
        env->fpus |= 0x400;
4248
    }
4249
}
4250

    
4251
void helper_fstenv(target_ulong ptr, int data32)
4252
{
4253
    int fpus, fptag, exp, i;
4254
    uint64_t mant;
4255
    CPU86_LDoubleU tmp;
4256

    
4257
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4258
    fptag = 0;
4259
    for (i=7; i>=0; i--) {
4260
        fptag <<= 2;
4261
        if (env->fptags[i]) {
4262
            fptag |= 3;
4263
        } else {
4264
            tmp.d = env->fpregs[i].d;
4265
            exp = EXPD(tmp);
4266
            mant = MANTD(tmp);
4267
            if (exp == 0 && mant == 0) {
4268
                /* zero */
4269
                fptag |= 1;
4270
            } else if (exp == 0 || exp == MAXEXPD
4271
#ifdef USE_X86LDOUBLE
4272
                       || (mant & (1LL << 63)) == 0
4273
#endif
4274
                       ) {
4275
                /* NaNs, infinity, denormal */
4276
                fptag |= 2;
4277
            }
4278
        }
4279
    }
4280
    if (data32) {
4281
        /* 32 bit */
4282
        stl(ptr, env->fpuc);
4283
        stl(ptr + 4, fpus);
4284
        stl(ptr + 8, fptag);
4285
        stl(ptr + 12, 0); /* fpip */
4286
        stl(ptr + 16, 0); /* fpcs */
4287
        stl(ptr + 20, 0); /* fpoo */
4288
        stl(ptr + 24, 0); /* fpos */
4289
    } else {
4290
        /* 16 bit */
4291
        stw(ptr, env->fpuc);
4292
        stw(ptr + 2, fpus);
4293
        stw(ptr + 4, fptag);
4294
        stw(ptr + 6, 0);
4295
        stw(ptr + 8, 0);
4296
        stw(ptr + 10, 0);
4297
        stw(ptr + 12, 0);
4298
    }
4299
}
4300

    
4301
void helper_fldenv(target_ulong ptr, int data32)
4302
{
4303
    int i, fpus, fptag;
4304

    
4305
    if (data32) {
4306
        env->fpuc = lduw(ptr);
4307
        fpus = lduw(ptr + 4);
4308
        fptag = lduw(ptr + 8);
4309
    }
4310
    else {
4311
        env->fpuc = lduw(ptr);
4312
        fpus = lduw(ptr + 2);
4313
        fptag = lduw(ptr + 4);
4314
    }
4315
    env->fpstt = (fpus >> 11) & 7;
4316
    env->fpus = fpus & ~0x3800;
4317
    for(i = 0;i < 8; i++) {
4318
        env->fptags[i] = ((fptag & 3) == 3);
4319
        fptag >>= 2;
4320
    }
4321
}
4322

    
4323
void helper_fsave(target_ulong ptr, int data32)
4324
{
4325
    CPU86_LDouble tmp;
4326
    int i;
4327

    
4328
    helper_fstenv(ptr, data32);
4329

    
4330
    ptr += (14 << data32);
4331
    for(i = 0;i < 8; i++) {
4332
        tmp = ST(i);
4333
        helper_fstt(tmp, ptr);
4334
        ptr += 10;
4335
    }
4336

    
4337
    /* fninit */
4338
    env->fpus = 0;
4339
    env->fpstt = 0;
4340
    env->fpuc = 0x37f;
4341
    env->fptags[0] = 1;
4342
    env->fptags[1] = 1;
4343
    env->fptags[2] = 1;
4344
    env->fptags[3] = 1;
4345
    env->fptags[4] = 1;
4346
    env->fptags[5] = 1;
4347
    env->fptags[6] = 1;
4348
    env->fptags[7] = 1;
4349
}
4350

    
4351
void helper_frstor(target_ulong ptr, int data32)
4352
{
4353
    CPU86_LDouble tmp;
4354
    int i;
4355

    
4356
    helper_fldenv(ptr, data32);
4357
    ptr += (14 << data32);
4358

    
4359
    for(i = 0;i < 8; i++) {
4360
        tmp = helper_fldt(ptr);
4361
        ST(i) = tmp;
4362
        ptr += 10;
4363
    }
4364
}
4365

    
4366
void helper_fxsave(target_ulong ptr, int data64)
4367
{
4368
    int fpus, fptag, i, nb_xmm_regs;
4369
    CPU86_LDouble tmp;
4370
    target_ulong addr;
4371

    
4372
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4373
    fptag = 0;
4374
    for(i = 0; i < 8; i++) {
4375
        fptag |= (env->fptags[i] << i);
4376
    }
4377
    stw(ptr, env->fpuc);
4378
    stw(ptr + 2, fpus);
4379
    stw(ptr + 4, fptag ^ 0xff);
4380
#ifdef TARGET_X86_64
4381
    if (data64) {
4382
        stq(ptr + 0x08, 0); /* rip */
4383
        stq(ptr + 0x10, 0); /* rdp */
4384
    } else 
4385
#endif
4386
    {
4387
        stl(ptr + 0x08, 0); /* eip */
4388
        stl(ptr + 0x0c, 0); /* sel  */
4389
        stl(ptr + 0x10, 0); /* dp */
4390
        stl(ptr + 0x14, 0); /* sel  */
4391
    }
4392

    
4393
    addr = ptr + 0x20;
4394
    for(i = 0;i < 8; i++) {
4395
        tmp = ST(i);
4396
        helper_fstt(tmp, addr);
4397
        addr += 16;
4398
    }
4399

    
4400
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4401
        /* XXX: finish it */
4402
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4403
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4404
        if (env->hflags & HF_CS64_MASK)
4405
            nb_xmm_regs = 16;
4406
        else
4407
            nb_xmm_regs = 8;
4408
        addr = ptr + 0xa0;
4409
        for(i = 0; i < nb_xmm_regs; i++) {
4410
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4411
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4412
            addr += 16;
4413
        }
4414
    }
4415
}
4416

    
4417
void helper_fxrstor(target_ulong ptr, int data64)
4418
{
4419
    int i, fpus, fptag, nb_xmm_regs;
4420
    CPU86_LDouble tmp;
4421
    target_ulong addr;
4422

    
4423
    env->fpuc = lduw(ptr);
4424
    fpus = lduw(ptr + 2);
4425
    fptag = lduw(ptr + 4);
4426
    env->fpstt = (fpus >> 11) & 7;
4427
    env->fpus = fpus & ~0x3800;
4428
    fptag ^= 0xff;
4429
    for(i = 0;i < 8; i++) {
4430
        env->fptags[i] = ((fptag >> i) & 1);
4431
    }
4432

    
4433
    addr = ptr + 0x20;
4434
    for(i = 0;i < 8; i++) {
4435
        tmp = helper_fldt(addr);
4436
        ST(i) = tmp;
4437
        addr += 16;
4438
    }
4439

    
4440
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4441
        /* XXX: finish it */
4442
        env->mxcsr = ldl(ptr + 0x18);
4443
        //ldl(ptr + 0x1c);
4444
        if (env->hflags & HF_CS64_MASK)
4445
            nb_xmm_regs = 16;
4446
        else
4447
            nb_xmm_regs = 8;
4448
        addr = ptr + 0xa0;
4449
        for(i = 0; i < nb_xmm_regs; i++) {
4450
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4451
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4452
            addr += 16;
4453
        }
4454
    }
4455
}
4456

    
4457
#ifndef USE_X86LDOUBLE
4458

    
4459
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4460
{
4461
    CPU86_LDoubleU temp;
4462
    int e;
4463

    
4464
    temp.d = f;
4465
    /* mantissa */
4466
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4467
    /* exponent + sign */
4468
    e = EXPD(temp) - EXPBIAS + 16383;
4469
    e |= SIGND(temp) >> 16;
4470
    *pexp = e;
4471
}
4472

    
4473
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4474
{
4475
    CPU86_LDoubleU temp;
4476
    int e;
4477
    uint64_t ll;
4478

    
4479
    /* XXX: handle overflow ? */
4480
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4481
    e |= (upper >> 4) & 0x800; /* sign */
4482
    ll = (mant >> 11) & ((1LL << 52) - 1);
4483
#ifdef __arm__
4484
    temp.l.upper = (e << 20) | (ll >> 32);
4485
    temp.l.lower = ll;
4486
#else
4487
    temp.ll = ll | ((uint64_t)e << 52);
4488
#endif
4489
    return temp.d;
4490
}
4491

    
4492
#else
4493

    
4494
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4495
{
4496
    CPU86_LDoubleU temp;
4497

    
4498
    temp.d = f;
4499
    *pmant = temp.l.lower;
4500
    *pexp = temp.l.upper;
4501
}
4502

    
4503
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4504
{
4505
    CPU86_LDoubleU temp;
4506

    
4507
    temp.l.upper = upper;
4508
    temp.l.lower = mant;
4509
    return temp.d;
4510
}
4511
#endif
4512

    
4513
#ifdef TARGET_X86_64
4514

    
4515
//#define DEBUG_MULDIV
4516

    
4517
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4518
{
4519
    *plow += a;
4520
    /* carry test */
4521
    if (*plow < a)
4522
        (*phigh)++;
4523
    *phigh += b;
4524
}
4525

    
4526
static void neg128(uint64_t *plow, uint64_t *phigh)
4527
{
4528
    *plow = ~ *plow;
4529
    *phigh = ~ *phigh;
4530
    add128(plow, phigh, 1, 0);
4531
}
4532

    
4533
/* return TRUE if overflow */
4534
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4535
{
4536
    uint64_t q, r, a1, a0;
4537
    int i, qb, ab;
4538

    
4539
    a0 = *plow;
4540
    a1 = *phigh;
4541
    if (a1 == 0) {
4542
        q = a0 / b;
4543
        r = a0 % b;
4544
        *plow = q;
4545
        *phigh = r;
4546
    } else {
4547
        if (a1 >= b)
4548
            return 1;
4549
        /* XXX: use a better algorithm */
4550
        for(i = 0; i < 64; i++) {
4551
            ab = a1 >> 63;
4552
            a1 = (a1 << 1) | (a0 >> 63);
4553
            if (ab || a1 >= b) {
4554
                a1 -= b;
4555
                qb = 1;
4556
            } else {
4557
                qb = 0;
4558
            }
4559
            a0 = (a0 << 1) | qb;
4560
        }
4561
#if defined(DEBUG_MULDIV)
4562
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4563
               *phigh, *plow, b, a0, a1);
4564
#endif
4565
        *plow = a0;
4566
        *phigh = a1;
4567
    }
4568
    return 0;
4569
}
4570

    
4571
/* return TRUE if overflow */
4572
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4573
{
4574
    int sa, sb;
4575
    sa = ((int64_t)*phigh < 0);
4576
    if (sa)
4577
        neg128(plow, phigh);
4578
    sb = (b < 0);
4579
    if (sb)
4580
        b = -b;
4581
    if (div64(plow, phigh, b) != 0)
4582
        return 1;
4583
    if (sa ^ sb) {
4584
        if (*plow > (1ULL << 63))
4585
            return 1;
4586
        *plow = - *plow;
4587
    } else {
4588
        if (*plow >= (1ULL << 63))
4589
            return 1;
4590
    }
4591
    if (sa)
4592
        *phigh = - *phigh;
4593
    return 0;
4594
}
4595

    
4596
void helper_mulq_EAX_T0(target_ulong t0)
4597
{
4598
    uint64_t r0, r1;
4599

    
4600
    mulu64(&r0, &r1, EAX, t0);
4601
    EAX = r0;
4602
    EDX = r1;
4603
    CC_DST = r0;
4604
    CC_SRC = r1;
4605
}
4606

    
4607
void helper_imulq_EAX_T0(target_ulong t0)
4608
{
4609
    uint64_t r0, r1;
4610

    
4611
    muls64(&r0, &r1, EAX, t0);
4612
    EAX = r0;
4613
    EDX = r1;
4614
    CC_DST = r0;
4615
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4616
}
4617

    
4618
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4619
{
4620
    uint64_t r0, r1;
4621

    
4622
    muls64(&r0, &r1, t0, t1);
4623
    CC_DST = r0;
4624
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4625
    return r0;
4626
}
4627

    
4628
void helper_divq_EAX(target_ulong t0)
4629
{
4630
    uint64_t r0, r1;
4631
    if (t0 == 0) {
4632
        raise_exception(EXCP00_DIVZ);
4633
    }
4634
    r0 = EAX;
4635
    r1 = EDX;
4636
    if (div64(&r0, &r1, t0))
4637
        raise_exception(EXCP00_DIVZ);
4638
    EAX = r0;
4639
    EDX = r1;
4640
}
4641

    
4642
void helper_idivq_EAX(target_ulong t0)
4643
{
4644
    uint64_t r0, r1;
4645
    if (t0 == 0) {
4646
        raise_exception(EXCP00_DIVZ);
4647
    }
4648
    r0 = EAX;
4649
    r1 = EDX;
4650
    if (idiv64(&r0, &r1, t0))
4651
        raise_exception(EXCP00_DIVZ);
4652
    EAX = r0;
4653
    EDX = r1;
4654
}
4655
#endif
4656

    
4657
static void do_hlt(void)
4658
{
4659
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4660
    env->halted = 1;
4661
    env->exception_index = EXCP_HLT;
4662
    cpu_loop_exit();
4663
}
4664

    
4665
void helper_hlt(int next_eip_addend)
4666
{
4667
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4668
    EIP += next_eip_addend;
4669
    
4670
    do_hlt();
4671
}
4672

    
4673
void helper_monitor(target_ulong ptr)
4674
{
4675
    if ((uint32_t)ECX != 0)
4676
        raise_exception(EXCP0D_GPF);
4677
    /* XXX: store address ? */
4678
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4679
}
4680

    
4681
void helper_mwait(int next_eip_addend)
4682
{
4683
    if ((uint32_t)ECX != 0)
4684
        raise_exception(EXCP0D_GPF);
4685
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4686
    EIP += next_eip_addend;
4687

    
4688
    /* XXX: not complete but not completely erroneous */
4689
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4690
        /* more than one CPU: do not sleep because another CPU may
4691
           wake this one */
4692
    } else {
4693
        do_hlt();
4694
    }
4695
}
4696

    
4697
void helper_debug(void)
4698
{
4699
    env->exception_index = EXCP_DEBUG;
4700
    cpu_loop_exit();
4701
}
4702

    
4703
void helper_raise_interrupt(int intno, int next_eip_addend)
4704
{
4705
    raise_interrupt(intno, 1, 0, next_eip_addend);
4706
}
4707

    
4708
void helper_raise_exception(int exception_index)
4709
{
4710
    raise_exception(exception_index);
4711
}
4712

    
4713
void helper_cli(void)
4714
{
4715
    env->eflags &= ~IF_MASK;
4716
}
4717

    
4718
void helper_sti(void)
4719
{
4720
    env->eflags |= IF_MASK;
4721
}
4722

    
4723
#if 0
4724
/* vm86plus instructions */
4725
void helper_cli_vm(void)
4726
{
4727
    env->eflags &= ~VIF_MASK;
4728
}
4729

4730
void helper_sti_vm(void)
4731
{
4732
    env->eflags |= VIF_MASK;
4733
    if (env->eflags & VIP_MASK) {
4734
        raise_exception(EXCP0D_GPF);
4735
    }
4736
}
4737
#endif
4738

    
4739
void helper_set_inhibit_irq(void)
4740
{
4741
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4742
}
4743

    
4744
void helper_reset_inhibit_irq(void)
4745
{
4746
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4747
}
4748

    
4749
void helper_boundw(target_ulong a0, int v)
4750
{
4751
    int low, high;
4752
    low = ldsw(a0);
4753
    high = ldsw(a0 + 2);
4754
    v = (int16_t)v;
4755
    if (v < low || v > high) {
4756
        raise_exception(EXCP05_BOUND);
4757
    }
4758
    FORCE_RET();
4759
}
4760

    
4761
void helper_boundl(target_ulong a0, int v)
4762
{
4763
    int low, high;
4764
    low = ldl(a0);
4765
    high = ldl(a0 + 4);
4766
    if (v < low || v > high) {
4767
        raise_exception(EXCP05_BOUND);
4768
    }
4769
    FORCE_RET();
4770
}
4771

    
4772
static float approx_rsqrt(float a)
4773
{
4774
    return 1.0 / sqrt(a);
4775
}
4776

    
4777
static float approx_rcp(float a)
4778
{
4779
    return 1.0 / a;
4780
}
4781

    
4782
#if !defined(CONFIG_USER_ONLY)
4783

    
4784
#define MMUSUFFIX _mmu
4785

    
4786
#define SHIFT 0
4787
#include "softmmu_template.h"
4788

    
4789
#define SHIFT 1
4790
#include "softmmu_template.h"
4791

    
4792
#define SHIFT 2
4793
#include "softmmu_template.h"
4794

    
4795
#define SHIFT 3
4796
#include "softmmu_template.h"
4797

    
4798
#endif
4799

    
4800
/* try to fill the TLB and return an exception if error. If retaddr is
4801
   NULL, it means that the function was called in C code (i.e. not
4802
   from generated code or from helper.c) */
4803
/* XXX: fix it to restore all registers */
4804
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4805
{
4806
    TranslationBlock *tb;
4807
    int ret;
4808
    unsigned long pc;
4809
    CPUX86State *saved_env;
4810

    
4811
    /* XXX: hack to restore env in all cases, even if not called from
4812
       generated code */
4813
    saved_env = env;
4814
    env = cpu_single_env;
4815

    
4816
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4817
    if (ret) {
4818
        if (retaddr) {
4819
            /* now we have a real cpu fault */
4820
            pc = (unsigned long)retaddr;
4821
            tb = tb_find_pc(pc);
4822
            if (tb) {
4823
                /* the PC is inside the translated code. It means that we have
4824
                   a virtual CPU fault */
4825
                cpu_restore_state(tb, env, pc, NULL);
4826
            }
4827
        }
4828
        raise_exception_err(env->exception_index, env->error_code);
4829
    }
4830
    env = saved_env;
4831
}
4832

    
4833

    
4834
/* Secure Virtual Machine helpers */
4835

    
4836
#if defined(CONFIG_USER_ONLY)
4837

    
4838
void helper_vmrun(int aflag, int next_eip_addend)
4839
{ 
4840
}
4841
void helper_vmmcall(void) 
4842
{ 
4843
}
4844
void helper_vmload(int aflag)
4845
{ 
4846
}
4847
void helper_vmsave(int aflag)
4848
{ 
4849
}
4850
void helper_stgi(void)
4851
{
4852
}
4853
void helper_clgi(void)
4854
{
4855
}
4856
void helper_skinit(void) 
4857
{ 
4858
}
4859
void helper_invlpga(int aflag)
4860
{ 
4861
}
4862
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4863
{ 
4864
}
4865
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4866
{
4867
}
4868

    
4869
void helper_svm_check_io(uint32_t port, uint32_t param, 
4870
                         uint32_t next_eip_addend)
4871
{
4872
}
4873
#else
4874

    
4875
static inline void svm_save_seg(target_phys_addr_t addr,
4876
                                const SegmentCache *sc)
4877
{
4878
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4879
             sc->selector);
4880
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4881
             sc->base);
4882
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4883
             sc->limit);
4884
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4885
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4886
}
4887
                                
4888
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4889
{
4890
    unsigned int flags;
4891

    
4892
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4893
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4894
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4895
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4896
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4897
}
4898

    
4899
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4900
                                      CPUState *env, int seg_reg)
4901
{
4902
    SegmentCache sc1, *sc = &sc1;
4903
    svm_load_seg(addr, sc);
4904
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4905
                           sc->base, sc->limit, sc->flags);
4906
}
4907

    
4908
void helper_vmrun(int aflag, int next_eip_addend)
4909
{
4910
    target_ulong addr;
4911
    uint32_t event_inj;
4912
    uint32_t int_ctl;
4913

    
4914
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4915

    
4916
    if (aflag == 2)
4917
        addr = EAX;
4918
    else
4919
        addr = (uint32_t)EAX;
4920

    
4921
    if (loglevel & CPU_LOG_TB_IN_ASM)
4922
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4923

    
4924
    env->vm_vmcb = addr;
4925

    
4926
    /* save the current CPU state in the hsave page */
4927
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4928
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4929

    
4930
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4931
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4932

    
4933
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4934
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4935
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4936
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4937
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4938
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4939

    
4940
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4941
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4942

    
4943
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4944
                  &env->segs[R_ES]);
4945
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4946
                 &env->segs[R_CS]);
4947
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4948
                 &env->segs[R_SS]);
4949
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4950
                 &env->segs[R_DS]);
4951

    
4952
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4953
             EIP + next_eip_addend);
4954
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4955
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4956

    
4957
    /* load the interception bitmaps so we do not need to access the
4958
       vmcb in svm mode */
4959
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4960
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4961
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4962
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4963
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4964
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4965

    
4966
    /* enable intercepts */
4967
    env->hflags |= HF_SVMI_MASK;
4968

    
4969
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4970

    
4971
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4972
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4973

    
4974
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4975
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4976

    
4977
    /* clear exit_info_2 so we behave like the real hardware */
4978
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4979

    
4980
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4981
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4982
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4983
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4984
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4985
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4986
    if (int_ctl & V_INTR_MASKING_MASK) {
4987
        env->v_tpr = int_ctl & V_TPR_MASK;
4988
        env->hflags2 |= HF2_VINTR_MASK;
4989
        if (env->eflags & IF_MASK)
4990
            env->hflags2 |= HF2_HIF_MASK;
4991
    }
4992

    
4993
    cpu_load_efer(env, 
4994
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4995
    env->eflags = 0;
4996
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4997
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4998
    CC_OP = CC_OP_EFLAGS;
4999

    
5000
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5001
                       env, R_ES);
5002
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5003
                       env, R_CS);
5004
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5005
                       env, R_SS);
5006
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5007
                       env, R_DS);
5008

    
5009
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5010
    env->eip = EIP;
5011
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5012
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5013
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5014
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5015
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5016

    
5017
    /* FIXME: guest state consistency checks */
5018

    
5019
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5020
        case TLB_CONTROL_DO_NOTHING:
5021
            break;
5022
        case TLB_CONTROL_FLUSH_ALL_ASID:
5023
            /* FIXME: this is not 100% correct but should work for now */
5024
            tlb_flush(env, 1);
5025
        break;
5026
    }
5027

    
5028
    env->hflags2 |= HF2_GIF_MASK;
5029

    
5030
    if (int_ctl & V_IRQ_MASK) {
5031
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5032
    }
5033

    
5034
    /* maybe we need to inject an event */
5035
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5036
    if (event_inj & SVM_EVTINJ_VALID) {
5037
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5038
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5039
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5040
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
5041

    
5042
        if (loglevel & CPU_LOG_TB_IN_ASM)
5043
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
5044
        /* FIXME: need to implement valid_err */
5045
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5046
        case SVM_EVTINJ_TYPE_INTR:
5047
                env->exception_index = vector;
5048
                env->error_code = event_inj_err;
5049
                env->exception_is_int = 0;
5050
                env->exception_next_eip = -1;
5051
                if (loglevel & CPU_LOG_TB_IN_ASM)
5052
                    fprintf(logfile, "INTR");
5053
                /* XXX: is it always correct ? */
5054
                do_interrupt(vector, 0, 0, 0, 1);
5055
                break;
5056
        case SVM_EVTINJ_TYPE_NMI:
5057
                env->exception_index = EXCP02_NMI;
5058
                env->error_code = event_inj_err;
5059
                env->exception_is_int = 0;
5060
                env->exception_next_eip = EIP;
5061
                if (loglevel & CPU_LOG_TB_IN_ASM)
5062
                    fprintf(logfile, "NMI");
5063
                cpu_loop_exit();
5064
                break;
5065
        case SVM_EVTINJ_TYPE_EXEPT:
5066
                env->exception_index = vector;
5067
                env->error_code = event_inj_err;
5068
                env->exception_is_int = 0;
5069
                env->exception_next_eip = -1;
5070
                if (loglevel & CPU_LOG_TB_IN_ASM)
5071
                    fprintf(logfile, "EXEPT");
5072
                cpu_loop_exit();
5073
                break;
5074
        case SVM_EVTINJ_TYPE_SOFT:
5075
                env->exception_index = vector;
5076
                env->error_code = event_inj_err;
5077
                env->exception_is_int = 1;
5078
                env->exception_next_eip = EIP;
5079
                if (loglevel & CPU_LOG_TB_IN_ASM)
5080
                    fprintf(logfile, "SOFT");
5081
                cpu_loop_exit();
5082
                break;
5083
        }
5084
        if (loglevel & CPU_LOG_TB_IN_ASM)
5085
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
5086
    }
5087
}
5088

    
5089
void helper_vmmcall(void)
5090
{
5091
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5092
    raise_exception(EXCP06_ILLOP);
5093
}
5094

    
5095
void helper_vmload(int aflag)
5096
{
5097
    target_ulong addr;
5098
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5099

    
5100
    if (aflag == 2)
5101
        addr = EAX;
5102
    else
5103
        addr = (uint32_t)EAX;
5104

    
5105
    if (loglevel & CPU_LOG_TB_IN_ASM)
5106
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5107
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5108
                env->segs[R_FS].base);
5109

    
5110
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5111
                       env, R_FS);
5112
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5113
                       env, R_GS);
5114
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5115
                 &env->tr);
5116
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5117
                 &env->ldt);
5118

    
5119
#ifdef TARGET_X86_64
5120
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5121
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5122
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5123
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5124
#endif
5125
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5126
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5127
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5128
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5129
}
5130

    
5131
void helper_vmsave(int aflag)
5132
{
5133
    target_ulong addr;
5134
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5135

    
5136
    if (aflag == 2)
5137
        addr = EAX;
5138
    else
5139
        addr = (uint32_t)EAX;
5140

    
5141
    if (loglevel & CPU_LOG_TB_IN_ASM)
5142
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5143
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5144
                env->segs[R_FS].base);
5145

    
5146
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5147
                 &env->segs[R_FS]);
5148
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5149
                 &env->segs[R_GS]);
5150
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5151
                 &env->tr);
5152
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5153
                 &env->ldt);
5154

    
5155
#ifdef TARGET_X86_64
5156
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5157
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5158
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5159
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5160
#endif
5161
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5162
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5163
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5164
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5165
}
5166

    
5167
void helper_stgi(void)
5168
{
5169
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5170
    env->hflags2 |= HF2_GIF_MASK;
5171
}
5172

    
5173
void helper_clgi(void)
5174
{
5175
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5176
    env->hflags2 &= ~HF2_GIF_MASK;
5177
}
5178

    
5179
void helper_skinit(void)
5180
{
5181
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5182
    /* XXX: not implemented */
5183
    raise_exception(EXCP06_ILLOP);
5184
}
5185

    
5186
void helper_invlpga(int aflag)
5187
{
5188
    target_ulong addr;
5189
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5190
    
5191
    if (aflag == 2)
5192
        addr = EAX;
5193
    else
5194
        addr = (uint32_t)EAX;
5195

    
5196
    /* XXX: could use the ASID to see if it is needed to do the
5197
       flush */
5198
    tlb_flush_page(env, addr);
5199
}
5200

    
5201
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5202
{
5203
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5204
        return;
5205
    switch(type) {
5206
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5207
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5208
            helper_vmexit(type, param);
5209
        }
5210
        break;
5211
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5212
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5213
            helper_vmexit(type, param);
5214
        }
5215
        break;
5216
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5217
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5218
            helper_vmexit(type, param);
5219
        }
5220
        break;
5221
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5222
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5223
            helper_vmexit(type, param);
5224
        }
5225
        break;
5226
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5227
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5228
            helper_vmexit(type, param);
5229
        }
5230
        break;
5231
    case SVM_EXIT_MSR:
5232
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5233
            /* FIXME: this should be read in at vmrun (faster this way?) */
5234
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5235
            uint32_t t0, t1;
5236
            switch((uint32_t)ECX) {
5237
            case 0 ... 0x1fff:
5238
                t0 = (ECX * 2) % 8;
5239
                t1 = ECX / 8;
5240
                break;
5241
            case 0xc0000000 ... 0xc0001fff:
5242
                t0 = (8192 + ECX - 0xc0000000) * 2;
5243
                t1 = (t0 / 8);
5244
                t0 %= 8;
5245
                break;
5246
            case 0xc0010000 ... 0xc0011fff:
5247
                t0 = (16384 + ECX - 0xc0010000) * 2;
5248
                t1 = (t0 / 8);
5249
                t0 %= 8;
5250
                break;
5251
            default:
5252
                helper_vmexit(type, param);
5253
                t0 = 0;
5254
                t1 = 0;
5255
                break;
5256
            }
5257
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5258
                helper_vmexit(type, param);
5259
        }
5260
        break;
5261
    default:
5262
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5263
            helper_vmexit(type, param);
5264
        }
5265
        break;
5266
    }
5267
}
5268

    
5269
void helper_svm_check_io(uint32_t port, uint32_t param, 
5270
                         uint32_t next_eip_addend)
5271
{
5272
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5273
        /* FIXME: this should be read in at vmrun (faster this way?) */
5274
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5275
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5276
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5277
            /* next EIP */
5278
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5279
                     env->eip + next_eip_addend);
5280
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5281
        }
5282
    }
5283
}
5284

    
5285
/* Note: currently only 32 bits of exit_code are used */
5286
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5287
{
5288
    uint32_t int_ctl;
5289

    
5290
    if (loglevel & CPU_LOG_TB_IN_ASM)
5291
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5292
                exit_code, exit_info_1,
5293
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5294
                EIP);
5295

    
5296
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5297
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5298
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5299
    } else {
5300
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5301
    }
5302

    
5303
    /* Save the VM state in the vmcb */
5304
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5305
                 &env->segs[R_ES]);
5306
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5307
                 &env->segs[R_CS]);
5308
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5309
                 &env->segs[R_SS]);
5310
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5311
                 &env->segs[R_DS]);
5312

    
5313
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5314
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5315

    
5316
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5317
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5318

    
5319
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5320
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5321
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5324

    
5325
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5326
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5327
    int_ctl |= env->v_tpr & V_TPR_MASK;
5328
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5329
        int_ctl |= V_IRQ_MASK;
5330
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5331

    
5332
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5333
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5335
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5338
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5339

    
5340
    /* Reload the host state from vm_hsave */
5341
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5342
    env->hflags &= ~HF_SVMI_MASK;
5343
    env->intercept = 0;
5344
    env->intercept_exceptions = 0;
5345
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5346
    env->tsc_offset = 0;
5347

    
5348
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5349
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5350

    
5351
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5352
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5353

    
5354
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5355
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5356
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5357
    /* we need to set the efer after the crs so the hidden flags get
5358
       set properly */
5359
    cpu_load_efer(env, 
5360
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5361
    env->eflags = 0;
5362
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5363
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5364
    CC_OP = CC_OP_EFLAGS;
5365

    
5366
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5367
                       env, R_ES);
5368
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5369
                       env, R_CS);
5370
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5371
                       env, R_SS);
5372
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5373
                       env, R_DS);
5374

    
5375
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5376
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5377
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5378

    
5379
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5380
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5381

    
5382
    /* other setups */
5383
    cpu_x86_set_cpl(env, 0);
5384
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5385
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5386

    
5387
    env->hflags2 &= ~HF2_GIF_MASK;
5388
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5389

    
5390
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5391

    
5392
    /* Clears the TSC_OFFSET inside the processor. */
5393

    
5394
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5395
       from the page table indicated the host's CR3. If the PDPEs contain
5396
       illegal state, the processor causes a shutdown. */
5397

    
5398
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5399
    env->cr[0] |= CR0_PE_MASK;
5400
    env->eflags &= ~VM_MASK;
5401

    
5402
    /* Disables all breakpoints in the host DR7 register. */
5403

    
5404
    /* Checks the reloaded host state for consistency. */
5405

    
5406
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5407
       host's code segment or non-canonical (in the case of long mode), a
5408
       #GP fault is delivered inside the host.) */
5409

    
5410
    /* remove any pending exception */
5411
    env->exception_index = -1;
5412
    env->error_code = 0;
5413
    env->old_exception = -1;
5414

    
5415
    cpu_loop_exit();
5416
}
5417

    
5418
#endif
5419

    
5420
/* MMX/SSE */
5421
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5422
void helper_enter_mmx(void)
5423
{
5424
    env->fpstt = 0;
5425
    *(uint32_t *)(env->fptags) = 0;
5426
    *(uint32_t *)(env->fptags + 4) = 0;
5427
}
5428

    
5429
void helper_emms(void)
5430
{
5431
    /* set to empty state */
5432
    *(uint32_t *)(env->fptags) = 0x01010101;
5433
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5434
}
5435

    
5436
/* XXX: suppress */
5437
void helper_movq(uint64_t *d, uint64_t *s)
5438
{
5439
    *d = *s;
5440
}
5441

    
5442
#define SHIFT 0
5443
#include "ops_sse.h"
5444

    
5445
#define SHIFT 1
5446
#include "ops_sse.h"
5447

    
5448
#define SHIFT 0
5449
#include "helper_template.h"
5450
#undef SHIFT
5451

    
5452
#define SHIFT 1
5453
#include "helper_template.h"
5454
#undef SHIFT
5455

    
5456
#define SHIFT 2
5457
#include "helper_template.h"
5458
#undef SHIFT
5459

    
5460
#ifdef TARGET_X86_64
5461

    
5462
#define SHIFT 3
5463
#include "helper_template.h"
5464
#undef SHIFT
5465

    
5466
#endif
5467

    
5468
/* bit operations */
5469
target_ulong helper_bsf(target_ulong t0)
5470
{
5471
    int count;
5472
    target_ulong res;
5473

    
5474
    res = t0;
5475
    count = 0;
5476
    while ((res & 1) == 0) {
5477
        count++;
5478
        res >>= 1;
5479
    }
5480
    return count;
5481
}
5482

    
5483
target_ulong helper_bsr(target_ulong t0)
5484
{
5485
    int count;
5486
    target_ulong res, mask;
5487
    
5488
    res = t0;
5489
    count = TARGET_LONG_BITS - 1;
5490
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5491
    while ((res & mask) == 0) {
5492
        count--;
5493
        res <<= 1;
5494
    }
5495
    return count;
5496
}
5497

    
5498

    
5499
static int compute_all_eflags(void)
5500
{
5501
    return CC_SRC;
5502
}
5503

    
5504
static int compute_c_eflags(void)
5505
{
5506
    return CC_SRC & CC_C;
5507
}
5508

    
5509
CCTable cc_table[CC_OP_NB] = {
5510
    [CC_OP_DYNAMIC] = { /* should never happen */ },
5511

    
5512
    [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5513

    
5514
    [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5515
    [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5516
    [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5517

    
5518
    [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5519
    [CC_OP_ADDW] = { compute_all_addw, compute_c_addw  },
5520
    [CC_OP_ADDL] = { compute_all_addl, compute_c_addl  },
5521

    
5522
    [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5523
    [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw  },
5524
    [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl  },
5525

    
5526
    [CC_OP_SUBB] = { compute_all_subb, compute_c_subb  },
5527
    [CC_OP_SUBW] = { compute_all_subw, compute_c_subw  },
5528
    [CC_OP_SUBL] = { compute_all_subl, compute_c_subl  },
5529

    
5530
    [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb  },
5531
    [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw  },
5532
    [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl  },
5533

    
5534
    [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5535
    [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5536
    [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5537

    
5538
    [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5539
    [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5540
    [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5541

    
5542
    [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5543
    [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5544
    [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5545

    
5546
    [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5547
    [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5548
    [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5549

    
5550
    [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5551
    [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5552
    [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5553

    
5554
#ifdef TARGET_X86_64
5555
    [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5556

    
5557
    [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq  },
5558

    
5559
    [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq  },
5560

    
5561
    [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq  },
5562

    
5563
    [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq  },
5564

    
5565
    [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5566

    
5567
    [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5568

    
5569
    [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5570

    
5571
    [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5572

    
5573
    [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5574
#endif
5575
};
5576