Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 7a51ad82

History | View | Annotate | Download (129.9 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
void helper_syscall(int next_eip_addend)
975
{
976
    int selector;
977

    
978
    if (!(env->efer & MSR_EFER_SCE)) {
979
        raise_exception_err(EXCP06_ILLOP, 0);
980
    }
981
    selector = (env->star >> 32) & 0xffff;
982
#ifdef TARGET_X86_64
983
    if (env->hflags & HF_LMA_MASK) {
984
        int code64;
985

    
986
        ECX = env->eip + next_eip_addend;
987
        env->regs[11] = compute_eflags();
988

    
989
        code64 = env->hflags & HF_CS64_MASK;
990

    
991
        cpu_x86_set_cpl(env, 0);
992
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
993
                           0, 0xffffffff,
994
                               DESC_G_MASK | DESC_P_MASK |
995
                               DESC_S_MASK |
996
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
997
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
998
                               0, 0xffffffff,
999
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000
                               DESC_S_MASK |
1001
                               DESC_W_MASK | DESC_A_MASK);
1002
        env->eflags &= ~env->fmask;
1003
        if (code64)
1004
            env->eip = env->lstar;
1005
        else
1006
            env->eip = env->cstar;
1007
    } else
1008
#endif
1009
    {
1010
        ECX = (uint32_t)(env->eip + next_eip_addend);
1011

    
1012
        cpu_x86_set_cpl(env, 0);
1013
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014
                           0, 0xffffffff,
1015
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1016
                               DESC_S_MASK |
1017
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1018
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019
                               0, 0xffffffff,
1020
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021
                               DESC_S_MASK |
1022
                               DESC_W_MASK | DESC_A_MASK);
1023
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1024
        env->eip = (uint32_t)env->star;
1025
    }
1026
}
1027

    
1028
void helper_sysret(int dflag)
1029
{
1030
    int cpl, selector;
1031

    
1032
    if (!(env->efer & MSR_EFER_SCE)) {
1033
        raise_exception_err(EXCP06_ILLOP, 0);
1034
    }
1035
    cpl = env->hflags & HF_CPL_MASK;
1036
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1037
        raise_exception_err(EXCP0D_GPF, 0);
1038
    }
1039
    selector = (env->star >> 48) & 0xffff;
1040
#ifdef TARGET_X86_64
1041
    if (env->hflags & HF_LMA_MASK) {
1042
        if (dflag == 2) {
1043
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1044
                                   0, 0xffffffff,
1045
                                   DESC_G_MASK | DESC_P_MASK |
1046
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1047
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1048
                                   DESC_L_MASK);
1049
            env->eip = ECX;
1050
        } else {
1051
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1052
                                   0, 0xffffffff,
1053
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1054
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1055
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1056
            env->eip = (uint32_t)ECX;
1057
        }
1058
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1059
                               0, 0xffffffff,
1060
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1061
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062
                               DESC_W_MASK | DESC_A_MASK);
1063
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1064
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1065
        cpu_x86_set_cpl(env, 3);
1066
    } else
1067
#endif
1068
    {
1069
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1070
                               0, 0xffffffff,
1071
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1074
        env->eip = (uint32_t)ECX;
1075
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1076
                               0, 0xffffffff,
1077
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1078
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079
                               DESC_W_MASK | DESC_A_MASK);
1080
        env->eflags |= IF_MASK;
1081
        cpu_x86_set_cpl(env, 3);
1082
    }
1083
#ifdef USE_KQEMU
1084
    if (kqemu_is_ok(env)) {
1085
        if (env->hflags & HF_LMA_MASK)
1086
            CC_OP = CC_OP_EFLAGS;
1087
        env->exception_index = -1;
1088
        cpu_loop_exit();
1089
    }
1090
#endif
1091
}
1092

    
1093
/* real mode interrupt */
1094
static void do_interrupt_real(int intno, int is_int, int error_code,
1095
                              unsigned int next_eip)
1096
{
1097
    SegmentCache *dt;
1098
    target_ulong ptr, ssp;
1099
    int selector;
1100
    uint32_t offset, esp;
1101
    uint32_t old_cs, old_eip;
1102
    int svm_should_check = 1;
1103

    
1104
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1105
        next_eip = EIP;
1106
        svm_should_check = 0;
1107
    }
1108
    if (svm_should_check
1109
        && INTERCEPTEDl(_exceptions, 1 << intno)
1110
        && !is_int) {
1111
        raise_interrupt(intno, is_int, error_code, 0);
1112
    }
1113
    /* real mode (simpler !) */
1114
    dt = &env->idt;
1115
    if (intno * 4 + 3 > dt->limit)
1116
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1117
    ptr = dt->base + intno * 4;
1118
    offset = lduw_kernel(ptr);
1119
    selector = lduw_kernel(ptr + 2);
1120
    esp = ESP;
1121
    ssp = env->segs[R_SS].base;
1122
    if (is_int)
1123
        old_eip = next_eip;
1124
    else
1125
        old_eip = env->eip;
1126
    old_cs = env->segs[R_CS].selector;
1127
    /* XXX: use SS segment size ? */
1128
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1129
    PUSHW(ssp, esp, 0xffff, old_cs);
1130
    PUSHW(ssp, esp, 0xffff, old_eip);
1131

    
1132
    /* update processor state */
1133
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1134
    env->eip = offset;
1135
    env->segs[R_CS].selector = selector;
1136
    env->segs[R_CS].base = (selector << 4);
1137
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1138
}
1139

    
1140
/* fake user mode interrupt */
1141
void do_interrupt_user(int intno, int is_int, int error_code,
1142
                       target_ulong next_eip)
1143
{
1144
    SegmentCache *dt;
1145
    target_ulong ptr;
1146
    int dpl, cpl;
1147
    uint32_t e2;
1148

    
1149
    dt = &env->idt;
1150
    ptr = dt->base + (intno * 8);
1151
    e2 = ldl_kernel(ptr + 4);
1152

    
1153
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154
    cpl = env->hflags & HF_CPL_MASK;
1155
    /* check privledge if software int */
1156
    if (is_int && dpl < cpl)
1157
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1158

    
1159
    /* Since we emulate only user space, we cannot do more than
1160
       exiting the emulation with the suitable exception and error
1161
       code */
1162
    if (is_int)
1163
        EIP = next_eip;
1164
}
1165

    
1166
/*
1167
 * Begin execution of an interruption. is_int is TRUE if coming from
1168
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1169
 * instruction. It is only relevant if is_int is TRUE.
1170
 */
1171
void do_interrupt(int intno, int is_int, int error_code,
1172
                  target_ulong next_eip, int is_hw)
1173
{
1174
    if (loglevel & CPU_LOG_INT) {
1175
        if ((env->cr[0] & CR0_PE_MASK)) {
1176
            static int count;
1177
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1178
                    count, intno, error_code, is_int,
1179
                    env->hflags & HF_CPL_MASK,
1180
                    env->segs[R_CS].selector, EIP,
1181
                    (int)env->segs[R_CS].base + EIP,
1182
                    env->segs[R_SS].selector, ESP);
1183
            if (intno == 0x0e) {
1184
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1185
            } else {
1186
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1187
            }
1188
            fprintf(logfile, "\n");
1189
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1190
#if 0
1191
            {
1192
                int i;
1193
                uint8_t *ptr;
1194
                fprintf(logfile, "       code=");
1195
                ptr = env->segs[R_CS].base + env->eip;
1196
                for(i = 0; i < 16; i++) {
1197
                    fprintf(logfile, " %02x", ldub(ptr + i));
1198
                }
1199
                fprintf(logfile, "\n");
1200
            }
1201
#endif
1202
            count++;
1203
        }
1204
    }
1205
    if (env->cr[0] & CR0_PE_MASK) {
1206
#if TARGET_X86_64
1207
        if (env->hflags & HF_LMA_MASK) {
1208
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1209
        } else
1210
#endif
1211
        {
1212
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1213
        }
1214
    } else {
1215
        do_interrupt_real(intno, is_int, error_code, next_eip);
1216
    }
1217
}
1218

    
1219
/*
1220
 * Check nested exceptions and change to double or triple fault if
1221
 * needed. It should only be called, if this is not an interrupt.
1222
 * Returns the new exception number.
1223
 */
1224
int check_exception(int intno, int *error_code)
1225
{
1226
    char first_contributory = env->old_exception == 0 ||
1227
                              (env->old_exception >= 10 &&
1228
                               env->old_exception <= 13);
1229
    char second_contributory = intno == 0 ||
1230
                               (intno >= 10 && intno <= 13);
1231

    
1232
    if (loglevel & CPU_LOG_INT)
1233
        fprintf(logfile, "check_exception old: %x new %x\n",
1234
                env->old_exception, intno);
1235

    
1236
    if (env->old_exception == EXCP08_DBLE)
1237
        cpu_abort(env, "triple fault");
1238

    
1239
    if ((first_contributory && second_contributory)
1240
        || (env->old_exception == EXCP0E_PAGE &&
1241
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1242
        intno = EXCP08_DBLE;
1243
        *error_code = 0;
1244
    }
1245

    
1246
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1247
        (intno == EXCP08_DBLE))
1248
        env->old_exception = intno;
1249

    
1250
    return intno;
1251
}
1252

    
1253
/*
1254
 * Signal an interruption. It is executed in the main CPU loop.
1255
 * is_int is TRUE if coming from the int instruction. next_eip is the
1256
 * EIP value AFTER the interrupt instruction. It is only relevant if
1257
 * is_int is TRUE.
1258
 */
1259
void raise_interrupt(int intno, int is_int, int error_code,
1260
                     int next_eip_addend)
1261
{
1262
    if (!is_int) {
1263
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1264
        intno = check_exception(intno, &error_code);
1265
    }
1266

    
1267
    env->exception_index = intno;
1268
    env->error_code = error_code;
1269
    env->exception_is_int = is_int;
1270
    env->exception_next_eip = env->eip + next_eip_addend;
1271
    cpu_loop_exit();
1272
}
1273

    
1274
/* same as raise_exception_err, but do not restore global registers */
1275
static void raise_exception_err_norestore(int exception_index, int error_code)
1276
{
1277
    exception_index = check_exception(exception_index, &error_code);
1278

    
1279
    env->exception_index = exception_index;
1280
    env->error_code = error_code;
1281
    env->exception_is_int = 0;
1282
    env->exception_next_eip = 0;
1283
    longjmp(env->jmp_env, 1);
1284
}
1285

    
1286
/* shortcuts to generate exceptions */
1287

    
1288
void (raise_exception_err)(int exception_index, int error_code)
1289
{
1290
    raise_interrupt(exception_index, 0, error_code, 0);
1291
}
1292

    
1293
void raise_exception(int exception_index)
1294
{
1295
    raise_interrupt(exception_index, 0, 0, 0);
1296
}
1297

    
1298
/* SMM support */
1299

    
1300
#if defined(CONFIG_USER_ONLY)
1301

    
1302
void do_smm_enter(void)
1303
{
1304
}
1305

    
1306
void helper_rsm(void)
1307
{
1308
}
1309

    
1310
#else
1311

    
1312
#ifdef TARGET_X86_64
1313
#define SMM_REVISION_ID 0x00020064
1314
#else
1315
#define SMM_REVISION_ID 0x00020000
1316
#endif
1317

    
1318
void do_smm_enter(void)
1319
{
1320
    target_ulong sm_state;
1321
    SegmentCache *dt;
1322
    int i, offset;
1323

    
1324
    if (loglevel & CPU_LOG_INT) {
1325
        fprintf(logfile, "SMM: enter\n");
1326
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1327
    }
1328

    
1329
    env->hflags |= HF_SMM_MASK;
1330
    cpu_smm_update(env);
1331

    
1332
    sm_state = env->smbase + 0x8000;
1333

    
1334
#ifdef TARGET_X86_64
1335
    for(i = 0; i < 6; i++) {
1336
        dt = &env->segs[i];
1337
        offset = 0x7e00 + i * 16;
1338
        stw_phys(sm_state + offset, dt->selector);
1339
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1340
        stl_phys(sm_state + offset + 4, dt->limit);
1341
        stq_phys(sm_state + offset + 8, dt->base);
1342
    }
1343

    
1344
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1345
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1346

    
1347
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1348
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1349
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1350
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1351

    
1352
    stq_phys(sm_state + 0x7e88, env->idt.base);
1353
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1354

    
1355
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1356
    stq_phys(sm_state + 0x7e98, env->tr.base);
1357
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1358
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1359

    
1360
    stq_phys(sm_state + 0x7ed0, env->efer);
1361

    
1362
    stq_phys(sm_state + 0x7ff8, EAX);
1363
    stq_phys(sm_state + 0x7ff0, ECX);
1364
    stq_phys(sm_state + 0x7fe8, EDX);
1365
    stq_phys(sm_state + 0x7fe0, EBX);
1366
    stq_phys(sm_state + 0x7fd8, ESP);
1367
    stq_phys(sm_state + 0x7fd0, EBP);
1368
    stq_phys(sm_state + 0x7fc8, ESI);
1369
    stq_phys(sm_state + 0x7fc0, EDI);
1370
    for(i = 8; i < 16; i++)
1371
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1372
    stq_phys(sm_state + 0x7f78, env->eip);
1373
    stl_phys(sm_state + 0x7f70, compute_eflags());
1374
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1375
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1376

    
1377
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1378
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1379
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1380

    
1381
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1382
    stl_phys(sm_state + 0x7f00, env->smbase);
1383
#else
1384
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1385
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1386
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1387
    stl_phys(sm_state + 0x7ff0, env->eip);
1388
    stl_phys(sm_state + 0x7fec, EDI);
1389
    stl_phys(sm_state + 0x7fe8, ESI);
1390
    stl_phys(sm_state + 0x7fe4, EBP);
1391
    stl_phys(sm_state + 0x7fe0, ESP);
1392
    stl_phys(sm_state + 0x7fdc, EBX);
1393
    stl_phys(sm_state + 0x7fd8, EDX);
1394
    stl_phys(sm_state + 0x7fd4, ECX);
1395
    stl_phys(sm_state + 0x7fd0, EAX);
1396
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1397
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1398

    
1399
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1400
    stl_phys(sm_state + 0x7f64, env->tr.base);
1401
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1402
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1403

    
1404
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1405
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1406
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1407
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1408

    
1409
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1410
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1411

    
1412
    stl_phys(sm_state + 0x7f58, env->idt.base);
1413
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1414

    
1415
    for(i = 0; i < 6; i++) {
1416
        dt = &env->segs[i];
1417
        if (i < 3)
1418
            offset = 0x7f84 + i * 12;
1419
        else
1420
            offset = 0x7f2c + (i - 3) * 12;
1421
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1422
        stl_phys(sm_state + offset + 8, dt->base);
1423
        stl_phys(sm_state + offset + 4, dt->limit);
1424
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1425
    }
1426
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1427

    
1428
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1429
    stl_phys(sm_state + 0x7ef8, env->smbase);
1430
#endif
1431
    /* init SMM cpu state */
1432

    
1433
#ifdef TARGET_X86_64
1434
    env->efer = 0;
1435
    env->hflags &= ~HF_LMA_MASK;
1436
#endif
1437
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1438
    env->eip = 0x00008000;
1439
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1440
                           0xffffffff, 0);
1441
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1442
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1443
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1444
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1445
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1446

    
1447
    cpu_x86_update_cr0(env,
1448
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1449
    cpu_x86_update_cr4(env, 0);
1450
    env->dr[7] = 0x00000400;
1451
    CC_OP = CC_OP_EFLAGS;
1452
}
1453

    
1454
void helper_rsm(void)
1455
{
1456
    target_ulong sm_state;
1457
    int i, offset;
1458
    uint32_t val;
1459

    
1460
    sm_state = env->smbase + 0x8000;
1461
#ifdef TARGET_X86_64
1462
    env->efer = ldq_phys(sm_state + 0x7ed0);
1463
    if (env->efer & MSR_EFER_LMA)
1464
        env->hflags |= HF_LMA_MASK;
1465
    else
1466
        env->hflags &= ~HF_LMA_MASK;
1467

    
1468
    for(i = 0; i < 6; i++) {
1469
        offset = 0x7e00 + i * 16;
1470
        cpu_x86_load_seg_cache(env, i,
1471
                               lduw_phys(sm_state + offset),
1472
                               ldq_phys(sm_state + offset + 8),
1473
                               ldl_phys(sm_state + offset + 4),
1474
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1475
    }
1476

    
1477
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1478
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1479

    
1480
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1481
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1482
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1483
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1484

    
1485
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1486
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1487

    
1488
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1489
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1490
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1491
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1492

    
1493
    EAX = ldq_phys(sm_state + 0x7ff8);
1494
    ECX = ldq_phys(sm_state + 0x7ff0);
1495
    EDX = ldq_phys(sm_state + 0x7fe8);
1496
    EBX = ldq_phys(sm_state + 0x7fe0);
1497
    ESP = ldq_phys(sm_state + 0x7fd8);
1498
    EBP = ldq_phys(sm_state + 0x7fd0);
1499
    ESI = ldq_phys(sm_state + 0x7fc8);
1500
    EDI = ldq_phys(sm_state + 0x7fc0);
1501
    for(i = 8; i < 16; i++)
1502
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1503
    env->eip = ldq_phys(sm_state + 0x7f78);
1504
    load_eflags(ldl_phys(sm_state + 0x7f70),
1505
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1506
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1507
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1508

    
1509
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1510
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1511
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1512

    
1513
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1514
    if (val & 0x20000) {
1515
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1516
    }
1517
#else
1518
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1519
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1520
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1521
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1522
    env->eip = ldl_phys(sm_state + 0x7ff0);
1523
    EDI = ldl_phys(sm_state + 0x7fec);
1524
    ESI = ldl_phys(sm_state + 0x7fe8);
1525
    EBP = ldl_phys(sm_state + 0x7fe4);
1526
    ESP = ldl_phys(sm_state + 0x7fe0);
1527
    EBX = ldl_phys(sm_state + 0x7fdc);
1528
    EDX = ldl_phys(sm_state + 0x7fd8);
1529
    ECX = ldl_phys(sm_state + 0x7fd4);
1530
    EAX = ldl_phys(sm_state + 0x7fd0);
1531
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1532
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1533

    
1534
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1535
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1536
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1537
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1538

    
1539
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1540
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1541
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1542
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1543

    
1544
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1545
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1546

    
1547
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1548
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1549

    
1550
    for(i = 0; i < 6; i++) {
1551
        if (i < 3)
1552
            offset = 0x7f84 + i * 12;
1553
        else
1554
            offset = 0x7f2c + (i - 3) * 12;
1555
        cpu_x86_load_seg_cache(env, i,
1556
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1557
                               ldl_phys(sm_state + offset + 8),
1558
                               ldl_phys(sm_state + offset + 4),
1559
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1560
    }
1561
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1562

    
1563
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1564
    if (val & 0x20000) {
1565
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1566
    }
1567
#endif
1568
    CC_OP = CC_OP_EFLAGS;
1569
    env->hflags &= ~HF_SMM_MASK;
1570
    cpu_smm_update(env);
1571

    
1572
    if (loglevel & CPU_LOG_INT) {
1573
        fprintf(logfile, "SMM: after RSM\n");
1574
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1575
    }
1576
}
1577

    
1578
#endif /* !CONFIG_USER_ONLY */
1579

    
1580

    
1581
#ifdef BUGGY_GCC_DIV64
1582
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1583
   call it from another function */
1584
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1585
{
1586
    *q_ptr = num / den;
1587
    return num % den;
1588
}
1589

    
1590
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1591
{
1592
    *q_ptr = num / den;
1593
    return num % den;
1594
}
1595
#endif
1596

    
1597
void helper_divl_EAX_T0(void)
1598
{
1599
    unsigned int den, r;
1600
    uint64_t num, q;
1601

    
1602
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1603
    den = T0;
1604
    if (den == 0) {
1605
        raise_exception(EXCP00_DIVZ);
1606
    }
1607
#ifdef BUGGY_GCC_DIV64
1608
    r = div32(&q, num, den);
1609
#else
1610
    q = (num / den);
1611
    r = (num % den);
1612
#endif
1613
    if (q > 0xffffffff)
1614
        raise_exception(EXCP00_DIVZ);
1615
    EAX = (uint32_t)q;
1616
    EDX = (uint32_t)r;
1617
}
1618

    
1619
void helper_idivl_EAX_T0(void)
1620
{
1621
    int den, r;
1622
    int64_t num, q;
1623

    
1624
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1625
    den = T0;
1626
    if (den == 0) {
1627
        raise_exception(EXCP00_DIVZ);
1628
    }
1629
#ifdef BUGGY_GCC_DIV64
1630
    r = idiv32(&q, num, den);
1631
#else
1632
    q = (num / den);
1633
    r = (num % den);
1634
#endif
1635
    if (q != (int32_t)q)
1636
        raise_exception(EXCP00_DIVZ);
1637
    EAX = (uint32_t)q;
1638
    EDX = (uint32_t)r;
1639
}
1640

    
1641
void helper_cmpxchg8b(void)
1642
{
1643
    uint64_t d;
1644
    int eflags;
1645

    
1646
    eflags = cc_table[CC_OP].compute_all();
1647
    d = ldq(A0);
1648
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1649
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1650
        eflags |= CC_Z;
1651
    } else {
1652
        EDX = d >> 32;
1653
        EAX = d;
1654
        eflags &= ~CC_Z;
1655
    }
1656
    CC_SRC = eflags;
1657
}
1658

    
1659
void helper_single_step()
1660
{
1661
    env->dr[6] |= 0x4000;
1662
    raise_exception(EXCP01_SSTP);
1663
}
1664

    
1665
void helper_cpuid(void)
1666
{
1667
    uint32_t index;
1668
    index = (uint32_t)EAX;
1669

    
1670
    /* test if maximum index reached */
1671
    if (index & 0x80000000) {
1672
        if (index > env->cpuid_xlevel)
1673
            index = env->cpuid_level;
1674
    } else {
1675
        if (index > env->cpuid_level)
1676
            index = env->cpuid_level;
1677
    }
1678

    
1679
    switch(index) {
1680
    case 0:
1681
        EAX = env->cpuid_level;
1682
        EBX = env->cpuid_vendor1;
1683
        EDX = env->cpuid_vendor2;
1684
        ECX = env->cpuid_vendor3;
1685
        break;
1686
    case 1:
1687
        EAX = env->cpuid_version;
1688
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1689
        ECX = env->cpuid_ext_features;
1690
        EDX = env->cpuid_features;
1691
        break;
1692
    case 2:
1693
        /* cache info: needed for Pentium Pro compatibility */
1694
        EAX = 1;
1695
        EBX = 0;
1696
        ECX = 0;
1697
        EDX = 0x2c307d;
1698
        break;
1699
    case 0x80000000:
1700
        EAX = env->cpuid_xlevel;
1701
        EBX = env->cpuid_vendor1;
1702
        EDX = env->cpuid_vendor2;
1703
        ECX = env->cpuid_vendor3;
1704
        break;
1705
    case 0x80000001:
1706
        EAX = env->cpuid_features;
1707
        EBX = 0;
1708
        ECX = env->cpuid_ext3_features;
1709
        EDX = env->cpuid_ext2_features;
1710
        break;
1711
    case 0x80000002:
1712
    case 0x80000003:
1713
    case 0x80000004:
1714
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1715
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1716
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1717
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1718
        break;
1719
    case 0x80000005:
1720
        /* cache info (L1 cache) */
1721
        EAX = 0x01ff01ff;
1722
        EBX = 0x01ff01ff;
1723
        ECX = 0x40020140;
1724
        EDX = 0x40020140;
1725
        break;
1726
    case 0x80000006:
1727
        /* cache info (L2 cache) */
1728
        EAX = 0;
1729
        EBX = 0x42004200;
1730
        ECX = 0x02008140;
1731
        EDX = 0;
1732
        break;
1733
    case 0x80000008:
1734
        /* virtual & phys address size in low 2 bytes. */
1735
        EAX = 0x00003028;
1736
        EBX = 0;
1737
        ECX = 0;
1738
        EDX = 0;
1739
        break;
1740
    default:
1741
        /* reserved values: zero */
1742
        EAX = 0;
1743
        EBX = 0;
1744
        ECX = 0;
1745
        EDX = 0;
1746
        break;
1747
    }
1748
}
1749

    
1750
void helper_enter_level(int level, int data32)
1751
{
1752
    target_ulong ssp;
1753
    uint32_t esp_mask, esp, ebp;
1754

    
1755
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1756
    ssp = env->segs[R_SS].base;
1757
    ebp = EBP;
1758
    esp = ESP;
1759
    if (data32) {
1760
        /* 32 bit */
1761
        esp -= 4;
1762
        while (--level) {
1763
            esp -= 4;
1764
            ebp -= 4;
1765
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1766
        }
1767
        esp -= 4;
1768
        stl(ssp + (esp & esp_mask), T1);
1769
    } else {
1770
        /* 16 bit */
1771
        esp -= 2;
1772
        while (--level) {
1773
            esp -= 2;
1774
            ebp -= 2;
1775
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1776
        }
1777
        esp -= 2;
1778
        stw(ssp + (esp & esp_mask), T1);
1779
    }
1780
}
1781

    
1782
#ifdef TARGET_X86_64
1783
void helper_enter64_level(int level, int data64)
1784
{
1785
    target_ulong esp, ebp;
1786
    ebp = EBP;
1787
    esp = ESP;
1788

    
1789
    if (data64) {
1790
        /* 64 bit */
1791
        esp -= 8;
1792
        while (--level) {
1793
            esp -= 8;
1794
            ebp -= 8;
1795
            stq(esp, ldq(ebp));
1796
        }
1797
        esp -= 8;
1798
        stq(esp, T1);
1799
    } else {
1800
        /* 16 bit */
1801
        esp -= 2;
1802
        while (--level) {
1803
            esp -= 2;
1804
            ebp -= 2;
1805
            stw(esp, lduw(ebp));
1806
        }
1807
        esp -= 2;
1808
        stw(esp, T1);
1809
    }
1810
}
1811
#endif
1812

    
1813
void helper_lldt_T0(void)
1814
{
1815
    int selector;
1816
    SegmentCache *dt;
1817
    uint32_t e1, e2;
1818
    int index, entry_limit;
1819
    target_ulong ptr;
1820

    
1821
    selector = T0 & 0xffff;
1822
    if ((selector & 0xfffc) == 0) {
1823
        /* XXX: NULL selector case: invalid LDT */
1824
        env->ldt.base = 0;
1825
        env->ldt.limit = 0;
1826
    } else {
1827
        if (selector & 0x4)
1828
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1829
        dt = &env->gdt;
1830
        index = selector & ~7;
1831
#ifdef TARGET_X86_64
1832
        if (env->hflags & HF_LMA_MASK)
1833
            entry_limit = 15;
1834
        else
1835
#endif
1836
            entry_limit = 7;
1837
        if ((index + entry_limit) > dt->limit)
1838
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1839
        ptr = dt->base + index;
1840
        e1 = ldl_kernel(ptr);
1841
        e2 = ldl_kernel(ptr + 4);
1842
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1843
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1844
        if (!(e2 & DESC_P_MASK))
1845
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1846
#ifdef TARGET_X86_64
1847
        if (env->hflags & HF_LMA_MASK) {
1848
            uint32_t e3;
1849
            e3 = ldl_kernel(ptr + 8);
1850
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1851
            env->ldt.base |= (target_ulong)e3 << 32;
1852
        } else
1853
#endif
1854
        {
1855
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1856
        }
1857
    }
1858
    env->ldt.selector = selector;
1859
}
1860

    
1861
void helper_ltr_T0(void)
1862
{
1863
    int selector;
1864
    SegmentCache *dt;
1865
    uint32_t e1, e2;
1866
    int index, type, entry_limit;
1867
    target_ulong ptr;
1868

    
1869
    selector = T0 & 0xffff;
1870
    if ((selector & 0xfffc) == 0) {
1871
        /* NULL selector case: invalid TR */
1872
        env->tr.base = 0;
1873
        env->tr.limit = 0;
1874
        env->tr.flags = 0;
1875
    } else {
1876
        if (selector & 0x4)
1877
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1878
        dt = &env->gdt;
1879
        index = selector & ~7;
1880
#ifdef TARGET_X86_64
1881
        if (env->hflags & HF_LMA_MASK)
1882
            entry_limit = 15;
1883
        else
1884
#endif
1885
            entry_limit = 7;
1886
        if ((index + entry_limit) > dt->limit)
1887
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1888
        ptr = dt->base + index;
1889
        e1 = ldl_kernel(ptr);
1890
        e2 = ldl_kernel(ptr + 4);
1891
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1892
        if ((e2 & DESC_S_MASK) ||
1893
            (type != 1 && type != 9))
1894
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1895
        if (!(e2 & DESC_P_MASK))
1896
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1897
#ifdef TARGET_X86_64
1898
        if (env->hflags & HF_LMA_MASK) {
1899
            uint32_t e3, e4;
1900
            e3 = ldl_kernel(ptr + 8);
1901
            e4 = ldl_kernel(ptr + 12);
1902
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1903
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1904
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1905
            env->tr.base |= (target_ulong)e3 << 32;
1906
        } else
1907
#endif
1908
        {
1909
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1910
        }
1911
        e2 |= DESC_TSS_BUSY_MASK;
1912
        stl_kernel(ptr + 4, e2);
1913
    }
1914
    env->tr.selector = selector;
1915
}
1916

    
1917
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1918
void load_seg(int seg_reg, int selector)
1919
{
1920
    uint32_t e1, e2;
1921
    int cpl, dpl, rpl;
1922
    SegmentCache *dt;
1923
    int index;
1924
    target_ulong ptr;
1925

    
1926
    selector &= 0xffff;
1927
    cpl = env->hflags & HF_CPL_MASK;
1928
    if ((selector & 0xfffc) == 0) {
1929
        /* null selector case */
1930
        if (seg_reg == R_SS
1931
#ifdef TARGET_X86_64
1932
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1933
#endif
1934
            )
1935
            raise_exception_err(EXCP0D_GPF, 0);
1936
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1937
    } else {
1938

    
1939
        if (selector & 0x4)
1940
            dt = &env->ldt;
1941
        else
1942
            dt = &env->gdt;
1943
        index = selector & ~7;
1944
        if ((index + 7) > dt->limit)
1945
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1946
        ptr = dt->base + index;
1947
        e1 = ldl_kernel(ptr);
1948
        e2 = ldl_kernel(ptr + 4);
1949

    
1950
        if (!(e2 & DESC_S_MASK))
1951
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1952
        rpl = selector & 3;
1953
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1954
        if (seg_reg == R_SS) {
1955
            /* must be writable segment */
1956
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1957
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1958
            if (rpl != cpl || dpl != cpl)
1959
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1960
        } else {
1961
            /* must be readable segment */
1962
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1963
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1964

    
1965
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1966
                /* if not conforming code, test rights */
1967
                if (dpl < cpl || dpl < rpl)
1968
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1969
            }
1970
        }
1971

    
1972
        if (!(e2 & DESC_P_MASK)) {
1973
            if (seg_reg == R_SS)
1974
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1975
            else
1976
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1977
        }
1978

    
1979
        /* set the access bit if not already set */
1980
        if (!(e2 & DESC_A_MASK)) {
1981
            e2 |= DESC_A_MASK;
1982
            stl_kernel(ptr + 4, e2);
1983
        }
1984

    
1985
        cpu_x86_load_seg_cache(env, seg_reg, selector,
1986
                       get_seg_base(e1, e2),
1987
                       get_seg_limit(e1, e2),
1988
                       e2);
1989
#if 0
1990
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1991
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1992
#endif
1993
    }
1994
}
1995

    
1996
/* protected mode jump */
1997
void helper_ljmp_protected_T0_T1(int next_eip_addend)
1998
{
1999
    int new_cs, gate_cs, type;
2000
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2001
    target_ulong new_eip, next_eip;
2002

    
2003
    new_cs = T0;
2004
    new_eip = T1;
2005
    if ((new_cs & 0xfffc) == 0)
2006
        raise_exception_err(EXCP0D_GPF, 0);
2007
    if (load_segment(&e1, &e2, new_cs) != 0)
2008
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2009
    cpl = env->hflags & HF_CPL_MASK;
2010
    if (e2 & DESC_S_MASK) {
2011
        if (!(e2 & DESC_CS_MASK))
2012
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2013
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2014
        if (e2 & DESC_C_MASK) {
2015
            /* conforming code segment */
2016
            if (dpl > cpl)
2017
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2018
        } else {
2019
            /* non conforming code segment */
2020
            rpl = new_cs & 3;
2021
            if (rpl > cpl)
2022
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2023
            if (dpl != cpl)
2024
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2025
        }
2026
        if (!(e2 & DESC_P_MASK))
2027
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2028
        limit = get_seg_limit(e1, e2);
2029
        if (new_eip > limit &&
2030
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2031
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2032
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2033
                       get_seg_base(e1, e2), limit, e2);
2034
        EIP = new_eip;
2035
    } else {
2036
        /* jump to call or task gate */
2037
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2038
        rpl = new_cs & 3;
2039
        cpl = env->hflags & HF_CPL_MASK;
2040
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2041
        switch(type) {
2042
        case 1: /* 286 TSS */
2043
        case 9: /* 386 TSS */
2044
        case 5: /* task gate */
2045
            if (dpl < cpl || dpl < rpl)
2046
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2047
            next_eip = env->eip + next_eip_addend;
2048
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2049
            CC_OP = CC_OP_EFLAGS;
2050
            break;
2051
        case 4: /* 286 call gate */
2052
        case 12: /* 386 call gate */
2053
            if ((dpl < cpl) || (dpl < rpl))
2054
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2055
            if (!(e2 & DESC_P_MASK))
2056
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2057
            gate_cs = e1 >> 16;
2058
            new_eip = (e1 & 0xffff);
2059
            if (type == 12)
2060
                new_eip |= (e2 & 0xffff0000);
2061
            if (load_segment(&e1, &e2, gate_cs) != 0)
2062
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2063
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2064
            /* must be code segment */
2065
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2066
                 (DESC_S_MASK | DESC_CS_MASK)))
2067
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2068
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2069
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2070
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2071
            if (!(e2 & DESC_P_MASK))
2072
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2073
            limit = get_seg_limit(e1, e2);
2074
            if (new_eip > limit)
2075
                raise_exception_err(EXCP0D_GPF, 0);
2076
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2077
                                   get_seg_base(e1, e2), limit, e2);
2078
            EIP = new_eip;
2079
            break;
2080
        default:
2081
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2082
            break;
2083
        }
2084
    }
2085
}
2086

    
2087
/* real mode call */
2088
void helper_lcall_real_T0_T1(int shift, int next_eip)
2089
{
2090
    int new_cs, new_eip;
2091
    uint32_t esp, esp_mask;
2092
    target_ulong ssp;
2093

    
2094
    new_cs = T0;
2095
    new_eip = T1;
2096
    esp = ESP;
2097
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2098
    ssp = env->segs[R_SS].base;
2099
    if (shift) {
2100
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2101
        PUSHL(ssp, esp, esp_mask, next_eip);
2102
    } else {
2103
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2104
        PUSHW(ssp, esp, esp_mask, next_eip);
2105
    }
2106

    
2107
    SET_ESP(esp, esp_mask);
2108
    env->eip = new_eip;
2109
    env->segs[R_CS].selector = new_cs;
2110
    env->segs[R_CS].base = (new_cs << 4);
2111
}
2112

    
2113
/* protected mode call */
2114
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2115
{
2116
    int new_cs, new_stack, i;
2117
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2118
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2119
    uint32_t val, limit, old_sp_mask;
2120
    target_ulong ssp, old_ssp, next_eip, new_eip;
2121

    
2122
    new_cs = T0;
2123
    new_eip = T1;
2124
    next_eip = env->eip + next_eip_addend;
2125
#ifdef DEBUG_PCALL
2126
    if (loglevel & CPU_LOG_PCALL) {
2127
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2128
                new_cs, (uint32_t)new_eip, shift);
2129
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2130
    }
2131
#endif
2132
    if ((new_cs & 0xfffc) == 0)
2133
        raise_exception_err(EXCP0D_GPF, 0);
2134
    if (load_segment(&e1, &e2, new_cs) != 0)
2135
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2136
    cpl = env->hflags & HF_CPL_MASK;
2137
#ifdef DEBUG_PCALL
2138
    if (loglevel & CPU_LOG_PCALL) {
2139
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2140
    }
2141
#endif
2142
    if (e2 & DESC_S_MASK) {
2143
        if (!(e2 & DESC_CS_MASK))
2144
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2145
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2146
        if (e2 & DESC_C_MASK) {
2147
            /* conforming code segment */
2148
            if (dpl > cpl)
2149
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2150
        } else {
2151
            /* non conforming code segment */
2152
            rpl = new_cs & 3;
2153
            if (rpl > cpl)
2154
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2155
            if (dpl != cpl)
2156
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2157
        }
2158
        if (!(e2 & DESC_P_MASK))
2159
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2160

    
2161
#ifdef TARGET_X86_64
2162
        /* XXX: check 16/32 bit cases in long mode */
2163
        if (shift == 2) {
2164
            target_ulong rsp;
2165
            /* 64 bit case */
2166
            rsp = ESP;
2167
            PUSHQ(rsp, env->segs[R_CS].selector);
2168
            PUSHQ(rsp, next_eip);
2169
            /* from this point, not restartable */
2170
            ESP = rsp;
2171
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2172
                                   get_seg_base(e1, e2),
2173
                                   get_seg_limit(e1, e2), e2);
2174
            EIP = new_eip;
2175
        } else
2176
#endif
2177
        {
2178
            sp = ESP;
2179
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2180
            ssp = env->segs[R_SS].base;
2181
            if (shift) {
2182
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2183
                PUSHL(ssp, sp, sp_mask, next_eip);
2184
            } else {
2185
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2186
                PUSHW(ssp, sp, sp_mask, next_eip);
2187
            }
2188

    
2189
            limit = get_seg_limit(e1, e2);
2190
            if (new_eip > limit)
2191
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2192
            /* from this point, not restartable */
2193
            SET_ESP(sp, sp_mask);
2194
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2195
                                   get_seg_base(e1, e2), limit, e2);
2196
            EIP = new_eip;
2197
        }
2198
    } else {
2199
        /* check gate type */
2200
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2201
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2202
        rpl = new_cs & 3;
2203
        switch(type) {
2204
        case 1: /* available 286 TSS */
2205
        case 9: /* available 386 TSS */
2206
        case 5: /* task gate */
2207
            if (dpl < cpl || dpl < rpl)
2208
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2210
            CC_OP = CC_OP_EFLAGS;
2211
            return;
2212
        case 4: /* 286 call gate */
2213
        case 12: /* 386 call gate */
2214
            break;
2215
        default:
2216
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
            break;
2218
        }
2219
        shift = type >> 3;
2220

    
2221
        if (dpl < cpl || dpl < rpl)
2222
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223
        /* check valid bit */
2224
        if (!(e2 & DESC_P_MASK))
2225
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2226
        selector = e1 >> 16;
2227
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2228
        param_count = e2 & 0x1f;
2229
        if ((selector & 0xfffc) == 0)
2230
            raise_exception_err(EXCP0D_GPF, 0);
2231

    
2232
        if (load_segment(&e1, &e2, selector) != 0)
2233
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2234
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2235
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237
        if (dpl > cpl)
2238
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2239
        if (!(e2 & DESC_P_MASK))
2240
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2241

    
2242
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2243
            /* to inner privilege */
2244
            get_ss_esp_from_tss(&ss, &sp, dpl);
2245
#ifdef DEBUG_PCALL
2246
            if (loglevel & CPU_LOG_PCALL)
2247
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2248
                        ss, sp, param_count, ESP);
2249
#endif
2250
            if ((ss & 0xfffc) == 0)
2251
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2252
            if ((ss & 3) != dpl)
2253
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2254
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2255
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2256
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2257
            if (ss_dpl != dpl)
2258
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2259
            if (!(ss_e2 & DESC_S_MASK) ||
2260
                (ss_e2 & DESC_CS_MASK) ||
2261
                !(ss_e2 & DESC_W_MASK))
2262
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2263
            if (!(ss_e2 & DESC_P_MASK))
2264
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2265

    
2266
            //            push_size = ((param_count * 2) + 8) << shift;
2267

    
2268
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2269
            old_ssp = env->segs[R_SS].base;
2270

    
2271
            sp_mask = get_sp_mask(ss_e2);
2272
            ssp = get_seg_base(ss_e1, ss_e2);
2273
            if (shift) {
2274
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2275
                PUSHL(ssp, sp, sp_mask, ESP);
2276
                for(i = param_count - 1; i >= 0; i--) {
2277
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2278
                    PUSHL(ssp, sp, sp_mask, val);
2279
                }
2280
            } else {
2281
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2282
                PUSHW(ssp, sp, sp_mask, ESP);
2283
                for(i = param_count - 1; i >= 0; i--) {
2284
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2285
                    PUSHW(ssp, sp, sp_mask, val);
2286
                }
2287
            }
2288
            new_stack = 1;
2289
        } else {
2290
            /* to same privilege */
2291
            sp = ESP;
2292
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2293
            ssp = env->segs[R_SS].base;
2294
            //            push_size = (4 << shift);
2295
            new_stack = 0;
2296
        }
2297

    
2298
        if (shift) {
2299
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2300
            PUSHL(ssp, sp, sp_mask, next_eip);
2301
        } else {
2302
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2303
            PUSHW(ssp, sp, sp_mask, next_eip);
2304
        }
2305

    
2306
        /* from this point, not restartable */
2307

    
2308
        if (new_stack) {
2309
            ss = (ss & ~3) | dpl;
2310
            cpu_x86_load_seg_cache(env, R_SS, ss,
2311
                                   ssp,
2312
                                   get_seg_limit(ss_e1, ss_e2),
2313
                                   ss_e2);
2314
        }
2315

    
2316
        selector = (selector & ~3) | dpl;
2317
        cpu_x86_load_seg_cache(env, R_CS, selector,
2318
                       get_seg_base(e1, e2),
2319
                       get_seg_limit(e1, e2),
2320
                       e2);
2321
        cpu_x86_set_cpl(env, dpl);
2322
        SET_ESP(sp, sp_mask);
2323
        EIP = offset;
2324
    }
2325
#ifdef USE_KQEMU
2326
    if (kqemu_is_ok(env)) {
2327
        env->exception_index = -1;
2328
        cpu_loop_exit();
2329
    }
2330
#endif
2331
}
2332

    
2333
/* real and vm86 mode iret */
2334
void helper_iret_real(int shift)
2335
{
2336
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2337
    target_ulong ssp;
2338
    int eflags_mask;
2339

    
2340
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2341
    sp = ESP;
2342
    ssp = env->segs[R_SS].base;
2343
    if (shift == 1) {
2344
        /* 32 bits */
2345
        POPL(ssp, sp, sp_mask, new_eip);
2346
        POPL(ssp, sp, sp_mask, new_cs);
2347
        new_cs &= 0xffff;
2348
        POPL(ssp, sp, sp_mask, new_eflags);
2349
    } else {
2350
        /* 16 bits */
2351
        POPW(ssp, sp, sp_mask, new_eip);
2352
        POPW(ssp, sp, sp_mask, new_cs);
2353
        POPW(ssp, sp, sp_mask, new_eflags);
2354
    }
2355
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2356
    load_seg_vm(R_CS, new_cs);
2357
    env->eip = new_eip;
2358
    if (env->eflags & VM_MASK)
2359
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2360
    else
2361
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2362
    if (shift == 0)
2363
        eflags_mask &= 0xffff;
2364
    load_eflags(new_eflags, eflags_mask);
2365
}
2366

    
2367
static inline void validate_seg(int seg_reg, int cpl)
2368
{
2369
    int dpl;
2370
    uint32_t e2;
2371

    
2372
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2373
       they may still contain a valid base. I would be interested to
2374
       know how a real x86_64 CPU behaves */
2375
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2376
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2377
        return;
2378

    
2379
    e2 = env->segs[seg_reg].flags;
2380
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2381
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2382
        /* data or non conforming code segment */
2383
        if (dpl < cpl) {
2384
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2385
        }
2386
    }
2387
}
2388

    
2389
/* protected mode iret */
2390
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2391
{
2392
    uint32_t new_cs, new_eflags, new_ss;
2393
    uint32_t new_es, new_ds, new_fs, new_gs;
2394
    uint32_t e1, e2, ss_e1, ss_e2;
2395
    int cpl, dpl, rpl, eflags_mask, iopl;
2396
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2397

    
2398
#ifdef TARGET_X86_64
2399
    if (shift == 2)
2400
        sp_mask = -1;
2401
    else
2402
#endif
2403
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2404
    sp = ESP;
2405
    ssp = env->segs[R_SS].base;
2406
    new_eflags = 0; /* avoid warning */
2407
#ifdef TARGET_X86_64
2408
    if (shift == 2) {
2409
        POPQ(sp, new_eip);
2410
        POPQ(sp, new_cs);
2411
        new_cs &= 0xffff;
2412
        if (is_iret) {
2413
            POPQ(sp, new_eflags);
2414
        }
2415
    } else
2416
#endif
2417
    if (shift == 1) {
2418
        /* 32 bits */
2419
        POPL(ssp, sp, sp_mask, new_eip);
2420
        POPL(ssp, sp, sp_mask, new_cs);
2421
        new_cs &= 0xffff;
2422
        if (is_iret) {
2423
            POPL(ssp, sp, sp_mask, new_eflags);
2424
            if (new_eflags & VM_MASK)
2425
                goto return_to_vm86;
2426
        }
2427
    } else {
2428
        /* 16 bits */
2429
        POPW(ssp, sp, sp_mask, new_eip);
2430
        POPW(ssp, sp, sp_mask, new_cs);
2431
        if (is_iret)
2432
            POPW(ssp, sp, sp_mask, new_eflags);
2433
    }
2434
#ifdef DEBUG_PCALL
2435
    if (loglevel & CPU_LOG_PCALL) {
2436
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2437
                new_cs, new_eip, shift, addend);
2438
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2439
    }
2440
#endif
2441
    if ((new_cs & 0xfffc) == 0)
2442
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443
    if (load_segment(&e1, &e2, new_cs) != 0)
2444
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2445
    if (!(e2 & DESC_S_MASK) ||
2446
        !(e2 & DESC_CS_MASK))
2447
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2448
    cpl = env->hflags & HF_CPL_MASK;
2449
    rpl = new_cs & 3;
2450
    if (rpl < cpl)
2451
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2453
    if (e2 & DESC_C_MASK) {
2454
        if (dpl > rpl)
2455
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2456
    } else {
2457
        if (dpl != rpl)
2458
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459
    }
2460
    if (!(e2 & DESC_P_MASK))
2461
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2462

    
2463
    sp += addend;
2464
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2465
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2466
        /* return to same priledge level */
2467
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2468
                       get_seg_base(e1, e2),
2469
                       get_seg_limit(e1, e2),
2470
                       e2);
2471
    } else {
2472
        /* return to different privilege level */
2473
#ifdef TARGET_X86_64
2474
        if (shift == 2) {
2475
            POPQ(sp, new_esp);
2476
            POPQ(sp, new_ss);
2477
            new_ss &= 0xffff;
2478
        } else
2479
#endif
2480
        if (shift == 1) {
2481
            /* 32 bits */
2482
            POPL(ssp, sp, sp_mask, new_esp);
2483
            POPL(ssp, sp, sp_mask, new_ss);
2484
            new_ss &= 0xffff;
2485
        } else {
2486
            /* 16 bits */
2487
            POPW(ssp, sp, sp_mask, new_esp);
2488
            POPW(ssp, sp, sp_mask, new_ss);
2489
        }
2490
#ifdef DEBUG_PCALL
2491
        if (loglevel & CPU_LOG_PCALL) {
2492
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2493
                    new_ss, new_esp);
2494
        }
2495
#endif
2496
        if ((new_ss & 0xfffc) == 0) {
2497
#ifdef TARGET_X86_64
2498
            /* NULL ss is allowed in long mode if cpl != 3*/
2499
            /* XXX: test CS64 ? */
2500
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2501
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2502
                                       0, 0xffffffff,
2503
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2504
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2505
                                       DESC_W_MASK | DESC_A_MASK);
2506
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2507
            } else
2508
#endif
2509
            {
2510
                raise_exception_err(EXCP0D_GPF, 0);
2511
            }
2512
        } else {
2513
            if ((new_ss & 3) != rpl)
2514
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2515
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2516
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2517
            if (!(ss_e2 & DESC_S_MASK) ||
2518
                (ss_e2 & DESC_CS_MASK) ||
2519
                !(ss_e2 & DESC_W_MASK))
2520
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2521
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2522
            if (dpl != rpl)
2523
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2524
            if (!(ss_e2 & DESC_P_MASK))
2525
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2526
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2527
                                   get_seg_base(ss_e1, ss_e2),
2528
                                   get_seg_limit(ss_e1, ss_e2),
2529
                                   ss_e2);
2530
        }
2531

    
2532
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2533
                       get_seg_base(e1, e2),
2534
                       get_seg_limit(e1, e2),
2535
                       e2);
2536
        cpu_x86_set_cpl(env, rpl);
2537
        sp = new_esp;
2538
#ifdef TARGET_X86_64
2539
        if (env->hflags & HF_CS64_MASK)
2540
            sp_mask = -1;
2541
        else
2542
#endif
2543
            sp_mask = get_sp_mask(ss_e2);
2544

    
2545
        /* validate data segments */
2546
        validate_seg(R_ES, rpl);
2547
        validate_seg(R_DS, rpl);
2548
        validate_seg(R_FS, rpl);
2549
        validate_seg(R_GS, rpl);
2550

    
2551
        sp += addend;
2552
    }
2553
    SET_ESP(sp, sp_mask);
2554
    env->eip = new_eip;
2555
    if (is_iret) {
2556
        /* NOTE: 'cpl' is the _old_ CPL */
2557
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2558
        if (cpl == 0)
2559
            eflags_mask |= IOPL_MASK;
2560
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2561
        if (cpl <= iopl)
2562
            eflags_mask |= IF_MASK;
2563
        if (shift == 0)
2564
            eflags_mask &= 0xffff;
2565
        load_eflags(new_eflags, eflags_mask);
2566
    }
2567
    return;
2568

    
2569
 return_to_vm86:
2570
    POPL(ssp, sp, sp_mask, new_esp);
2571
    POPL(ssp, sp, sp_mask, new_ss);
2572
    POPL(ssp, sp, sp_mask, new_es);
2573
    POPL(ssp, sp, sp_mask, new_ds);
2574
    POPL(ssp, sp, sp_mask, new_fs);
2575
    POPL(ssp, sp, sp_mask, new_gs);
2576

    
2577
    /* modify processor state */
2578
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2579
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2580
    load_seg_vm(R_CS, new_cs & 0xffff);
2581
    cpu_x86_set_cpl(env, 3);
2582
    load_seg_vm(R_SS, new_ss & 0xffff);
2583
    load_seg_vm(R_ES, new_es & 0xffff);
2584
    load_seg_vm(R_DS, new_ds & 0xffff);
2585
    load_seg_vm(R_FS, new_fs & 0xffff);
2586
    load_seg_vm(R_GS, new_gs & 0xffff);
2587

    
2588
    env->eip = new_eip & 0xffff;
2589
    ESP = new_esp;
2590
}
2591

    
2592
void helper_iret_protected(int shift, int next_eip)
2593
{
2594
    int tss_selector, type;
2595
    uint32_t e1, e2;
2596

    
2597
    /* specific case for TSS */
2598
    if (env->eflags & NT_MASK) {
2599
#ifdef TARGET_X86_64
2600
        if (env->hflags & HF_LMA_MASK)
2601
            raise_exception_err(EXCP0D_GPF, 0);
2602
#endif
2603
        tss_selector = lduw_kernel(env->tr.base + 0);
2604
        if (tss_selector & 4)
2605
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2606
        if (load_segment(&e1, &e2, tss_selector) != 0)
2607
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2608
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2609
        /* NOTE: we check both segment and busy TSS */
2610
        if (type != 3)
2611
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2612
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2613
    } else {
2614
        helper_ret_protected(shift, 1, 0);
2615
    }
2616
#ifdef USE_KQEMU
2617
    if (kqemu_is_ok(env)) {
2618
        CC_OP = CC_OP_EFLAGS;
2619
        env->exception_index = -1;
2620
        cpu_loop_exit();
2621
    }
2622
#endif
2623
}
2624

    
2625
void helper_lret_protected(int shift, int addend)
2626
{
2627
    helper_ret_protected(shift, 0, addend);
2628
#ifdef USE_KQEMU
2629
    if (kqemu_is_ok(env)) {
2630
        env->exception_index = -1;
2631
        cpu_loop_exit();
2632
    }
2633
#endif
2634
}
2635

    
2636
void helper_sysenter(void)
2637
{
2638
    if (env->sysenter_cs == 0) {
2639
        raise_exception_err(EXCP0D_GPF, 0);
2640
    }
2641
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2642
    cpu_x86_set_cpl(env, 0);
2643
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2644
                           0, 0xffffffff,
2645
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2646
                           DESC_S_MASK |
2647
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2648
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2649
                           0, 0xffffffff,
2650
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2651
                           DESC_S_MASK |
2652
                           DESC_W_MASK | DESC_A_MASK);
2653
    ESP = env->sysenter_esp;
2654
    EIP = env->sysenter_eip;
2655
}
2656

    
2657
void helper_sysexit(void)
2658
{
2659
    int cpl;
2660

    
2661
    cpl = env->hflags & HF_CPL_MASK;
2662
    if (env->sysenter_cs == 0 || cpl != 0) {
2663
        raise_exception_err(EXCP0D_GPF, 0);
2664
    }
2665
    cpu_x86_set_cpl(env, 3);
2666
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2667
                           0, 0xffffffff,
2668
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2669
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2670
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2671
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2672
                           0, 0xffffffff,
2673
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2674
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2675
                           DESC_W_MASK | DESC_A_MASK);
2676
    ESP = ECX;
2677
    EIP = EDX;
2678
#ifdef USE_KQEMU
2679
    if (kqemu_is_ok(env)) {
2680
        env->exception_index = -1;
2681
        cpu_loop_exit();
2682
    }
2683
#endif
2684
}
2685

    
2686
void helper_movl_crN_T0(int reg)
2687
{
2688
#if !defined(CONFIG_USER_ONLY)
2689
    switch(reg) {
2690
    case 0:
2691
        cpu_x86_update_cr0(env, T0);
2692
        break;
2693
    case 3:
2694
        cpu_x86_update_cr3(env, T0);
2695
        break;
2696
    case 4:
2697
        cpu_x86_update_cr4(env, T0);
2698
        break;
2699
    case 8:
2700
        cpu_set_apic_tpr(env, T0);
2701
        break;
2702
    default:
2703
        env->cr[reg] = T0;
2704
        break;
2705
    }
2706
#endif
2707
}
2708

    
2709
/* XXX: do more */
2710
void helper_movl_drN_T0(int reg)
2711
{
2712
    env->dr[reg] = T0;
2713
}
2714

    
2715
void helper_invlpg(target_ulong addr)
2716
{
2717
    cpu_x86_flush_tlb(env, addr);
2718
}
2719

    
2720
void helper_rdtsc(void)
2721
{
2722
    uint64_t val;
2723

    
2724
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2725
        raise_exception(EXCP0D_GPF);
2726
    }
2727
    val = cpu_get_tsc(env);
2728
    EAX = (uint32_t)(val);
2729
    EDX = (uint32_t)(val >> 32);
2730
}
2731

    
2732
#if defined(CONFIG_USER_ONLY)
2733
void helper_wrmsr(void)
2734
{
2735
}
2736

    
2737
void helper_rdmsr(void)
2738
{
2739
}
2740
#else
2741
void helper_wrmsr(void)
2742
{
2743
    uint64_t val;
2744

    
2745
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2746

    
2747
    switch((uint32_t)ECX) {
2748
    case MSR_IA32_SYSENTER_CS:
2749
        env->sysenter_cs = val & 0xffff;
2750
        break;
2751
    case MSR_IA32_SYSENTER_ESP:
2752
        env->sysenter_esp = val;
2753
        break;
2754
    case MSR_IA32_SYSENTER_EIP:
2755
        env->sysenter_eip = val;
2756
        break;
2757
    case MSR_IA32_APICBASE:
2758
        cpu_set_apic_base(env, val);
2759
        break;
2760
    case MSR_EFER:
2761
        {
2762
            uint64_t update_mask;
2763
            update_mask = 0;
2764
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2765
                update_mask |= MSR_EFER_SCE;
2766
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2767
                update_mask |= MSR_EFER_LME;
2768
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2769
                update_mask |= MSR_EFER_FFXSR;
2770
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2771
                update_mask |= MSR_EFER_NXE;
2772
            env->efer = (env->efer & ~update_mask) |
2773
            (val & update_mask);
2774
        }
2775
        break;
2776
    case MSR_STAR:
2777
        env->star = val;
2778
        break;
2779
    case MSR_PAT:
2780
        env->pat = val;
2781
        break;
2782
    case MSR_VM_HSAVE_PA:
2783
        env->vm_hsave = val;
2784
        break;
2785
#ifdef TARGET_X86_64
2786
    case MSR_LSTAR:
2787
        env->lstar = val;
2788
        break;
2789
    case MSR_CSTAR:
2790
        env->cstar = val;
2791
        break;
2792
    case MSR_FMASK:
2793
        env->fmask = val;
2794
        break;
2795
    case MSR_FSBASE:
2796
        env->segs[R_FS].base = val;
2797
        break;
2798
    case MSR_GSBASE:
2799
        env->segs[R_GS].base = val;
2800
        break;
2801
    case MSR_KERNELGSBASE:
2802
        env->kernelgsbase = val;
2803
        break;
2804
#endif
2805
    default:
2806
        /* XXX: exception ? */
2807
        break;
2808
    }
2809
}
2810

    
2811
void helper_rdmsr(void)
2812
{
2813
    uint64_t val;
2814
    switch((uint32_t)ECX) {
2815
    case MSR_IA32_SYSENTER_CS:
2816
        val = env->sysenter_cs;
2817
        break;
2818
    case MSR_IA32_SYSENTER_ESP:
2819
        val = env->sysenter_esp;
2820
        break;
2821
    case MSR_IA32_SYSENTER_EIP:
2822
        val = env->sysenter_eip;
2823
        break;
2824
    case MSR_IA32_APICBASE:
2825
        val = cpu_get_apic_base(env);
2826
        break;
2827
    case MSR_EFER:
2828
        val = env->efer;
2829
        break;
2830
    case MSR_STAR:
2831
        val = env->star;
2832
        break;
2833
    case MSR_PAT:
2834
        val = env->pat;
2835
        break;
2836
    case MSR_VM_HSAVE_PA:
2837
        val = env->vm_hsave;
2838
        break;
2839
#ifdef TARGET_X86_64
2840
    case MSR_LSTAR:
2841
        val = env->lstar;
2842
        break;
2843
    case MSR_CSTAR:
2844
        val = env->cstar;
2845
        break;
2846
    case MSR_FMASK:
2847
        val = env->fmask;
2848
        break;
2849
    case MSR_FSBASE:
2850
        val = env->segs[R_FS].base;
2851
        break;
2852
    case MSR_GSBASE:
2853
        val = env->segs[R_GS].base;
2854
        break;
2855
    case MSR_KERNELGSBASE:
2856
        val = env->kernelgsbase;
2857
        break;
2858
#endif
2859
    default:
2860
        /* XXX: exception ? */
2861
        val = 0;
2862
        break;
2863
    }
2864
    EAX = (uint32_t)(val);
2865
    EDX = (uint32_t)(val >> 32);
2866
}
2867
#endif
2868

    
2869
void helper_lsl(void)
2870
{
2871
    unsigned int selector, limit;
2872
    uint32_t e1, e2, eflags;
2873
    int rpl, dpl, cpl, type;
2874

    
2875
    eflags = cc_table[CC_OP].compute_all();
2876
    selector = T0 & 0xffff;
2877
    if (load_segment(&e1, &e2, selector) != 0)
2878
        goto fail;
2879
    rpl = selector & 3;
2880
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2881
    cpl = env->hflags & HF_CPL_MASK;
2882
    if (e2 & DESC_S_MASK) {
2883
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2884
            /* conforming */
2885
        } else {
2886
            if (dpl < cpl || dpl < rpl)
2887
                goto fail;
2888
        }
2889
    } else {
2890
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2891
        switch(type) {
2892
        case 1:
2893
        case 2:
2894
        case 3:
2895
        case 9:
2896
        case 11:
2897
            break;
2898
        default:
2899
            goto fail;
2900
        }
2901
        if (dpl < cpl || dpl < rpl) {
2902
        fail:
2903
            CC_SRC = eflags & ~CC_Z;
2904
            return;
2905
        }
2906
    }
2907
    limit = get_seg_limit(e1, e2);
2908
    T1 = limit;
2909
    CC_SRC = eflags | CC_Z;
2910
}
2911

    
2912
void helper_lar(void)
2913
{
2914
    unsigned int selector;
2915
    uint32_t e1, e2, eflags;
2916
    int rpl, dpl, cpl, type;
2917

    
2918
    eflags = cc_table[CC_OP].compute_all();
2919
    selector = T0 & 0xffff;
2920
    if ((selector & 0xfffc) == 0)
2921
        goto fail;
2922
    if (load_segment(&e1, &e2, selector) != 0)
2923
        goto fail;
2924
    rpl = selector & 3;
2925
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2926
    cpl = env->hflags & HF_CPL_MASK;
2927
    if (e2 & DESC_S_MASK) {
2928
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2929
            /* conforming */
2930
        } else {
2931
            if (dpl < cpl || dpl < rpl)
2932
                goto fail;
2933
        }
2934
    } else {
2935
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2936
        switch(type) {
2937
        case 1:
2938
        case 2:
2939
        case 3:
2940
        case 4:
2941
        case 5:
2942
        case 9:
2943
        case 11:
2944
        case 12:
2945
            break;
2946
        default:
2947
            goto fail;
2948
        }
2949
        if (dpl < cpl || dpl < rpl) {
2950
        fail:
2951
            CC_SRC = eflags & ~CC_Z;
2952
            return;
2953
        }
2954
    }
2955
    T1 = e2 & 0x00f0ff00;
2956
    CC_SRC = eflags | CC_Z;
2957
}
2958

    
2959
void helper_verr(void)
2960
{
2961
    unsigned int selector;
2962
    uint32_t e1, e2, eflags;
2963
    int rpl, dpl, cpl;
2964

    
2965
    eflags = cc_table[CC_OP].compute_all();
2966
    selector = T0 & 0xffff;
2967
    if ((selector & 0xfffc) == 0)
2968
        goto fail;
2969
    if (load_segment(&e1, &e2, selector) != 0)
2970
        goto fail;
2971
    if (!(e2 & DESC_S_MASK))
2972
        goto fail;
2973
    rpl = selector & 3;
2974
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2975
    cpl = env->hflags & HF_CPL_MASK;
2976
    if (e2 & DESC_CS_MASK) {
2977
        if (!(e2 & DESC_R_MASK))
2978
            goto fail;
2979
        if (!(e2 & DESC_C_MASK)) {
2980
            if (dpl < cpl || dpl < rpl)
2981
                goto fail;
2982
        }
2983
    } else {
2984
        if (dpl < cpl || dpl < rpl) {
2985
        fail:
2986
            CC_SRC = eflags & ~CC_Z;
2987
            return;
2988
        }
2989
    }
2990
    CC_SRC = eflags | CC_Z;
2991
}
2992

    
2993
void helper_verw(void)
2994
{
2995
    unsigned int selector;
2996
    uint32_t e1, e2, eflags;
2997
    int rpl, dpl, cpl;
2998

    
2999
    eflags = cc_table[CC_OP].compute_all();
3000
    selector = T0 & 0xffff;
3001
    if ((selector & 0xfffc) == 0)
3002
        goto fail;
3003
    if (load_segment(&e1, &e2, selector) != 0)
3004
        goto fail;
3005
    if (!(e2 & DESC_S_MASK))
3006
        goto fail;
3007
    rpl = selector & 3;
3008
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009
    cpl = env->hflags & HF_CPL_MASK;
3010
    if (e2 & DESC_CS_MASK) {
3011
        goto fail;
3012
    } else {
3013
        if (dpl < cpl || dpl < rpl)
3014
            goto fail;
3015
        if (!(e2 & DESC_W_MASK)) {
3016
        fail:
3017
            CC_SRC = eflags & ~CC_Z;
3018
            return;
3019
        }
3020
    }
3021
    CC_SRC = eflags | CC_Z;
3022
}
3023

    
3024
/* FPU helpers */
3025

    
3026
void helper_fldt_ST0_A0(void)
3027
{
3028
    int new_fpstt;
3029
    new_fpstt = (env->fpstt - 1) & 7;
3030
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3031
    env->fpstt = new_fpstt;
3032
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3033
}
3034

    
3035
void helper_fstt_ST0_A0(void)
3036
{
3037
    helper_fstt(ST0, A0);
3038
}
3039

    
3040
void fpu_set_exception(int mask)
3041
{
3042
    env->fpus |= mask;
3043
    if (env->fpus & (~env->fpuc & FPUC_EM))
3044
        env->fpus |= FPUS_SE | FPUS_B;
3045
}
3046

    
3047
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3048
{
3049
    if (b == 0.0)
3050
        fpu_set_exception(FPUS_ZE);
3051
    return a / b;
3052
}
3053

    
3054
void fpu_raise_exception(void)
3055
{
3056
    if (env->cr[0] & CR0_NE_MASK) {
3057
        raise_exception(EXCP10_COPR);
3058
    }
3059
#if !defined(CONFIG_USER_ONLY)
3060
    else {
3061
        cpu_set_ferr(env);
3062
    }
3063
#endif
3064
}
3065

    
3066
/* BCD ops */
3067

    
3068
void helper_fbld_ST0_A0(void)
3069
{
3070
    CPU86_LDouble tmp;
3071
    uint64_t val;
3072
    unsigned int v;
3073
    int i;
3074

    
3075
    val = 0;
3076
    for(i = 8; i >= 0; i--) {
3077
        v = ldub(A0 + i);
3078
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3079
    }
3080
    tmp = val;
3081
    if (ldub(A0 + 9) & 0x80)
3082
        tmp = -tmp;
3083
    fpush();
3084
    ST0 = tmp;
3085
}
3086

    
3087
void helper_fbst_ST0_A0(void)
3088
{
3089
    int v;
3090
    target_ulong mem_ref, mem_end;
3091
    int64_t val;
3092

    
3093
    val = floatx_to_int64(ST0, &env->fp_status);
3094
    mem_ref = A0;
3095
    mem_end = mem_ref + 9;
3096
    if (val < 0) {
3097
        stb(mem_end, 0x80);
3098
        val = -val;
3099
    } else {
3100
        stb(mem_end, 0x00);
3101
    }
3102
    while (mem_ref < mem_end) {
3103
        if (val == 0)
3104
            break;
3105
        v = val % 100;
3106
        val = val / 100;
3107
        v = ((v / 10) << 4) | (v % 10);
3108
        stb(mem_ref++, v);
3109
    }
3110
    while (mem_ref < mem_end) {
3111
        stb(mem_ref++, 0);
3112
    }
3113
}
3114

    
3115
void helper_f2xm1(void)
3116
{
3117
    ST0 = pow(2.0,ST0) - 1.0;
3118
}
3119

    
3120
void helper_fyl2x(void)
3121
{
3122
    CPU86_LDouble fptemp;
3123

    
3124
    fptemp = ST0;
3125
    if (fptemp>0.0){
3126
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3127
        ST1 *= fptemp;
3128
        fpop();
3129
    } else {
3130
        env->fpus &= (~0x4700);
3131
        env->fpus |= 0x400;
3132
    }
3133
}
3134

    
3135
void helper_fptan(void)
3136
{
3137
    CPU86_LDouble fptemp;
3138

    
3139
    fptemp = ST0;
3140
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3141
        env->fpus |= 0x400;
3142
    } else {
3143
        ST0 = tan(fptemp);
3144
        fpush();
3145
        ST0 = 1.0;
3146
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3147
        /* the above code is for  |arg| < 2**52 only */
3148
    }
3149
}
3150

    
3151
void helper_fpatan(void)
3152
{
3153
    CPU86_LDouble fptemp, fpsrcop;
3154

    
3155
    fpsrcop = ST1;
3156
    fptemp = ST0;
3157
    ST1 = atan2(fpsrcop,fptemp);
3158
    fpop();
3159
}
3160

    
3161
void helper_fxtract(void)
3162
{
3163
    CPU86_LDoubleU temp;
3164
    unsigned int expdif;
3165

    
3166
    temp.d = ST0;
3167
    expdif = EXPD(temp) - EXPBIAS;
3168
    /*DP exponent bias*/
3169
    ST0 = expdif;
3170
    fpush();
3171
    BIASEXPONENT(temp);
3172
    ST0 = temp.d;
3173
}
3174

    
3175
void helper_fprem1(void)
3176
{
3177
    CPU86_LDouble dblq, fpsrcop, fptemp;
3178
    CPU86_LDoubleU fpsrcop1, fptemp1;
3179
    int expdif;
3180
    signed long long int q;
3181

    
3182
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3183
        ST0 = 0.0 / 0.0; /* NaN */
3184
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3185
        return;
3186
    }
3187

    
3188
    fpsrcop = ST0;
3189
    fptemp = ST1;
3190
    fpsrcop1.d = fpsrcop;
3191
    fptemp1.d = fptemp;
3192
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3193

    
3194
    if (expdif < 0) {
3195
        /* optimisation? taken from the AMD docs */
3196
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3197
        /* ST0 is unchanged */
3198
        return;
3199
    }
3200

    
3201
    if (expdif < 53) {
3202
        dblq = fpsrcop / fptemp;
3203
        /* round dblq towards nearest integer */
3204
        dblq = rint(dblq);
3205
        ST0 = fpsrcop - fptemp * dblq;
3206

    
3207
        /* convert dblq to q by truncating towards zero */
3208
        if (dblq < 0.0)
3209
           q = (signed long long int)(-dblq);
3210
        else
3211
           q = (signed long long int)dblq;
3212

    
3213
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3214
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3215
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3216
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3217
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3218
    } else {
3219
        env->fpus |= 0x400;  /* C2 <-- 1 */
3220
        fptemp = pow(2.0, expdif - 50);
3221
        fpsrcop = (ST0 / ST1) / fptemp;
3222
        /* fpsrcop = integer obtained by chopping */
3223
        fpsrcop = (fpsrcop < 0.0) ?
3224
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3225
        ST0 -= (ST1 * fpsrcop * fptemp);
3226
    }
3227
}
3228

    
3229
void helper_fprem(void)
3230
{
3231
    CPU86_LDouble dblq, fpsrcop, fptemp;
3232
    CPU86_LDoubleU fpsrcop1, fptemp1;
3233
    int expdif;
3234
    signed long long int q;
3235

    
3236
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3237
       ST0 = 0.0 / 0.0; /* NaN */
3238
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3239
       return;
3240
    }
3241

    
3242
    fpsrcop = (CPU86_LDouble)ST0;
3243
    fptemp = (CPU86_LDouble)ST1;
3244
    fpsrcop1.d = fpsrcop;
3245
    fptemp1.d = fptemp;
3246
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3247

    
3248
    if (expdif < 0) {
3249
        /* optimisation? taken from the AMD docs */
3250
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3251
        /* ST0 is unchanged */
3252
        return;
3253
    }
3254

    
3255
    if ( expdif < 53 ) {
3256
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3257
        /* round dblq towards zero */
3258
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3259
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3260

    
3261
        /* convert dblq to q by truncating towards zero */
3262
        if (dblq < 0.0)
3263
           q = (signed long long int)(-dblq);
3264
        else
3265
           q = (signed long long int)dblq;
3266

    
3267
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3268
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3269
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3270
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3271
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3272
    } else {
3273
        int N = 32 + (expdif % 32); /* as per AMD docs */
3274
        env->fpus |= 0x400;  /* C2 <-- 1 */
3275
        fptemp = pow(2.0, (double)(expdif - N));
3276
        fpsrcop = (ST0 / ST1) / fptemp;
3277
        /* fpsrcop = integer obtained by chopping */
3278
        fpsrcop = (fpsrcop < 0.0) ?
3279
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3280
        ST0 -= (ST1 * fpsrcop * fptemp);
3281
    }
3282
}
3283

    
3284
void helper_fyl2xp1(void)
3285
{
3286
    CPU86_LDouble fptemp;
3287

    
3288
    fptemp = ST0;
3289
    if ((fptemp+1.0)>0.0) {
3290
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3291
        ST1 *= fptemp;
3292
        fpop();
3293
    } else {
3294
        env->fpus &= (~0x4700);
3295
        env->fpus |= 0x400;
3296
    }
3297
}
3298

    
3299
void helper_fsqrt(void)
3300
{
3301
    CPU86_LDouble fptemp;
3302

    
3303
    fptemp = ST0;
3304
    if (fptemp<0.0) {
3305
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3306
        env->fpus |= 0x400;
3307
    }
3308
    ST0 = sqrt(fptemp);
3309
}
3310

    
3311
void helper_fsincos(void)
3312
{
3313
    CPU86_LDouble fptemp;
3314

    
3315
    fptemp = ST0;
3316
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3317
        env->fpus |= 0x400;
3318
    } else {
3319
        ST0 = sin(fptemp);
3320
        fpush();
3321
        ST0 = cos(fptemp);
3322
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3323
        /* the above code is for  |arg| < 2**63 only */
3324
    }
3325
}
3326

    
3327
void helper_frndint(void)
3328
{
3329
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3330
}
3331

    
3332
void helper_fscale(void)
3333
{
3334
    ST0 = ldexp (ST0, (int)(ST1));
3335
}
3336

    
3337
void helper_fsin(void)
3338
{
3339
    CPU86_LDouble fptemp;
3340

    
3341
    fptemp = ST0;
3342
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3343
        env->fpus |= 0x400;
3344
    } else {
3345
        ST0 = sin(fptemp);
3346
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3347
        /* the above code is for  |arg| < 2**53 only */
3348
    }
3349
}
3350

    
3351
void helper_fcos(void)
3352
{
3353
    CPU86_LDouble fptemp;
3354

    
3355
    fptemp = ST0;
3356
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3357
        env->fpus |= 0x400;
3358
    } else {
3359
        ST0 = cos(fptemp);
3360
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3361
        /* the above code is for  |arg5 < 2**63 only */
3362
    }
3363
}
3364

    
3365
void helper_fxam_ST0(void)
3366
{
3367
    CPU86_LDoubleU temp;
3368
    int expdif;
3369

    
3370
    temp.d = ST0;
3371

    
3372
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3373
    if (SIGND(temp))
3374
        env->fpus |= 0x200; /* C1 <-- 1 */
3375

    
3376
    /* XXX: test fptags too */
3377
    expdif = EXPD(temp);
3378
    if (expdif == MAXEXPD) {
3379
#ifdef USE_X86LDOUBLE
3380
        if (MANTD(temp) == 0x8000000000000000ULL)
3381
#else
3382
        if (MANTD(temp) == 0)
3383
#endif
3384
            env->fpus |=  0x500 /*Infinity*/;
3385
        else
3386
            env->fpus |=  0x100 /*NaN*/;
3387
    } else if (expdif == 0) {
3388
        if (MANTD(temp) == 0)
3389
            env->fpus |=  0x4000 /*Zero*/;
3390
        else
3391
            env->fpus |= 0x4400 /*Denormal*/;
3392
    } else {
3393
        env->fpus |= 0x400;
3394
    }
3395
}
3396

    
3397
void helper_fstenv(target_ulong ptr, int data32)
3398
{
3399
    int fpus, fptag, exp, i;
3400
    uint64_t mant;
3401
    CPU86_LDoubleU tmp;
3402

    
3403
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3404
    fptag = 0;
3405
    for (i=7; i>=0; i--) {
3406
        fptag <<= 2;
3407
        if (env->fptags[i]) {
3408
            fptag |= 3;
3409
        } else {
3410
            tmp.d = env->fpregs[i].d;
3411
            exp = EXPD(tmp);
3412
            mant = MANTD(tmp);
3413
            if (exp == 0 && mant == 0) {
3414
                /* zero */
3415
                fptag |= 1;
3416
            } else if (exp == 0 || exp == MAXEXPD
3417
#ifdef USE_X86LDOUBLE
3418
                       || (mant & (1LL << 63)) == 0
3419
#endif
3420
                       ) {
3421
                /* NaNs, infinity, denormal */
3422
                fptag |= 2;
3423
            }
3424
        }
3425
    }
3426
    if (data32) {
3427
        /* 32 bit */
3428
        stl(ptr, env->fpuc);
3429
        stl(ptr + 4, fpus);
3430
        stl(ptr + 8, fptag);
3431
        stl(ptr + 12, 0); /* fpip */
3432
        stl(ptr + 16, 0); /* fpcs */
3433
        stl(ptr + 20, 0); /* fpoo */
3434
        stl(ptr + 24, 0); /* fpos */
3435
    } else {
3436
        /* 16 bit */
3437
        stw(ptr, env->fpuc);
3438
        stw(ptr + 2, fpus);
3439
        stw(ptr + 4, fptag);
3440
        stw(ptr + 6, 0);
3441
        stw(ptr + 8, 0);
3442
        stw(ptr + 10, 0);
3443
        stw(ptr + 12, 0);
3444
    }
3445
}
3446

    
3447
void helper_fldenv(target_ulong ptr, int data32)
3448
{
3449
    int i, fpus, fptag;
3450

    
3451
    if (data32) {
3452
        env->fpuc = lduw(ptr);
3453
        fpus = lduw(ptr + 4);
3454
        fptag = lduw(ptr + 8);
3455
    }
3456
    else {
3457
        env->fpuc = lduw(ptr);
3458
        fpus = lduw(ptr + 2);
3459
        fptag = lduw(ptr + 4);
3460
    }
3461
    env->fpstt = (fpus >> 11) & 7;
3462
    env->fpus = fpus & ~0x3800;
3463
    for(i = 0;i < 8; i++) {
3464
        env->fptags[i] = ((fptag & 3) == 3);
3465
        fptag >>= 2;
3466
    }
3467
}
3468

    
3469
void helper_fsave(target_ulong ptr, int data32)
3470
{
3471
    CPU86_LDouble tmp;
3472
    int i;
3473

    
3474
    helper_fstenv(ptr, data32);
3475

    
3476
    ptr += (14 << data32);
3477
    for(i = 0;i < 8; i++) {
3478
        tmp = ST(i);
3479
        helper_fstt(tmp, ptr);
3480
        ptr += 10;
3481
    }
3482

    
3483
    /* fninit */
3484
    env->fpus = 0;
3485
    env->fpstt = 0;
3486
    env->fpuc = 0x37f;
3487
    env->fptags[0] = 1;
3488
    env->fptags[1] = 1;
3489
    env->fptags[2] = 1;
3490
    env->fptags[3] = 1;
3491
    env->fptags[4] = 1;
3492
    env->fptags[5] = 1;
3493
    env->fptags[6] = 1;
3494
    env->fptags[7] = 1;
3495
}
3496

    
3497
void helper_frstor(target_ulong ptr, int data32)
3498
{
3499
    CPU86_LDouble tmp;
3500
    int i;
3501

    
3502
    helper_fldenv(ptr, data32);
3503
    ptr += (14 << data32);
3504

    
3505
    for(i = 0;i < 8; i++) {
3506
        tmp = helper_fldt(ptr);
3507
        ST(i) = tmp;
3508
        ptr += 10;
3509
    }
3510
}
3511

    
3512
void helper_fxsave(target_ulong ptr, int data64)
3513
{
3514
    int fpus, fptag, i, nb_xmm_regs;
3515
    CPU86_LDouble tmp;
3516
    target_ulong addr;
3517

    
3518
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3519
    fptag = 0;
3520
    for(i = 0; i < 8; i++) {
3521
        fptag |= (env->fptags[i] << i);
3522
    }
3523
    stw(ptr, env->fpuc);
3524
    stw(ptr + 2, fpus);
3525
    stw(ptr + 4, fptag ^ 0xff);
3526

    
3527
    addr = ptr + 0x20;
3528
    for(i = 0;i < 8; i++) {
3529
        tmp = ST(i);
3530
        helper_fstt(tmp, addr);
3531
        addr += 16;
3532
    }
3533

    
3534
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3535
        /* XXX: finish it */
3536
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3537
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3538
        nb_xmm_regs = 8 << data64;
3539
        addr = ptr + 0xa0;
3540
        for(i = 0; i < nb_xmm_regs; i++) {
3541
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3542
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3543
            addr += 16;
3544
        }
3545
    }
3546
}
3547

    
3548
void helper_fxrstor(target_ulong ptr, int data64)
3549
{
3550
    int i, fpus, fptag, nb_xmm_regs;
3551
    CPU86_LDouble tmp;
3552
    target_ulong addr;
3553

    
3554
    env->fpuc = lduw(ptr);
3555
    fpus = lduw(ptr + 2);
3556
    fptag = lduw(ptr + 4);
3557
    env->fpstt = (fpus >> 11) & 7;
3558
    env->fpus = fpus & ~0x3800;
3559
    fptag ^= 0xff;
3560
    for(i = 0;i < 8; i++) {
3561
        env->fptags[i] = ((fptag >> i) & 1);
3562
    }
3563

    
3564
    addr = ptr + 0x20;
3565
    for(i = 0;i < 8; i++) {
3566
        tmp = helper_fldt(addr);
3567
        ST(i) = tmp;
3568
        addr += 16;
3569
    }
3570

    
3571
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3572
        /* XXX: finish it */
3573
        env->mxcsr = ldl(ptr + 0x18);
3574
        //ldl(ptr + 0x1c);
3575
        nb_xmm_regs = 8 << data64;
3576
        addr = ptr + 0xa0;
3577
        for(i = 0; i < nb_xmm_regs; i++) {
3578
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3579
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3580
            addr += 16;
3581
        }
3582
    }
3583
}
3584

    
3585
#ifndef USE_X86LDOUBLE
3586

    
3587
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3588
{
3589
    CPU86_LDoubleU temp;
3590
    int e;
3591

    
3592
    temp.d = f;
3593
    /* mantissa */
3594
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3595
    /* exponent + sign */
3596
    e = EXPD(temp) - EXPBIAS + 16383;
3597
    e |= SIGND(temp) >> 16;
3598
    *pexp = e;
3599
}
3600

    
3601
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3602
{
3603
    CPU86_LDoubleU temp;
3604
    int e;
3605
    uint64_t ll;
3606

    
3607
    /* XXX: handle overflow ? */
3608
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3609
    e |= (upper >> 4) & 0x800; /* sign */
3610
    ll = (mant >> 11) & ((1LL << 52) - 1);
3611
#ifdef __arm__
3612
    temp.l.upper = (e << 20) | (ll >> 32);
3613
    temp.l.lower = ll;
3614
#else
3615
    temp.ll = ll | ((uint64_t)e << 52);
3616
#endif
3617
    return temp.d;
3618
}
3619

    
3620
#else
3621

    
3622
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3623
{
3624
    CPU86_LDoubleU temp;
3625

    
3626
    temp.d = f;
3627
    *pmant = temp.l.lower;
3628
    *pexp = temp.l.upper;
3629
}
3630

    
3631
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3632
{
3633
    CPU86_LDoubleU temp;
3634

    
3635
    temp.l.upper = upper;
3636
    temp.l.lower = mant;
3637
    return temp.d;
3638
}
3639
#endif
3640

    
3641
#ifdef TARGET_X86_64
3642

    
3643
//#define DEBUG_MULDIV
3644

    
3645
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3646
{
3647
    *plow += a;
3648
    /* carry test */
3649
    if (*plow < a)
3650
        (*phigh)++;
3651
    *phigh += b;
3652
}
3653

    
3654
static void neg128(uint64_t *plow, uint64_t *phigh)
3655
{
3656
    *plow = ~ *plow;
3657
    *phigh = ~ *phigh;
3658
    add128(plow, phigh, 1, 0);
3659
}
3660

    
3661
/* return TRUE if overflow */
3662
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3663
{
3664
    uint64_t q, r, a1, a0;
3665
    int i, qb, ab;
3666

    
3667
    a0 = *plow;
3668
    a1 = *phigh;
3669
    if (a1 == 0) {
3670
        q = a0 / b;
3671
        r = a0 % b;
3672
        *plow = q;
3673
        *phigh = r;
3674
    } else {
3675
        if (a1 >= b)
3676
            return 1;
3677
        /* XXX: use a better algorithm */
3678
        for(i = 0; i < 64; i++) {
3679
            ab = a1 >> 63;
3680
            a1 = (a1 << 1) | (a0 >> 63);
3681
            if (ab || a1 >= b) {
3682
                a1 -= b;
3683
                qb = 1;
3684
            } else {
3685
                qb = 0;
3686
            }
3687
            a0 = (a0 << 1) | qb;
3688
        }
3689
#if defined(DEBUG_MULDIV)
3690
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3691
               *phigh, *plow, b, a0, a1);
3692
#endif
3693
        *plow = a0;
3694
        *phigh = a1;
3695
    }
3696
    return 0;
3697
}
3698

    
3699
/* return TRUE if overflow */
3700
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3701
{
3702
    int sa, sb;
3703
    sa = ((int64_t)*phigh < 0);
3704
    if (sa)
3705
        neg128(plow, phigh);
3706
    sb = (b < 0);
3707
    if (sb)
3708
        b = -b;
3709
    if (div64(plow, phigh, b) != 0)
3710
        return 1;
3711
    if (sa ^ sb) {
3712
        if (*plow > (1ULL << 63))
3713
            return 1;
3714
        *plow = - *plow;
3715
    } else {
3716
        if (*plow >= (1ULL << 63))
3717
            return 1;
3718
    }
3719
    if (sa)
3720
        *phigh = - *phigh;
3721
    return 0;
3722
}
3723

    
3724
void helper_mulq_EAX_T0(void)
3725
{
3726
    uint64_t r0, r1;
3727

    
3728
    mulu64(&r0, &r1, EAX, T0);
3729
    EAX = r0;
3730
    EDX = r1;
3731
    CC_DST = r0;
3732
    CC_SRC = r1;
3733
}
3734

    
3735
void helper_imulq_EAX_T0(void)
3736
{
3737
    uint64_t r0, r1;
3738

    
3739
    muls64(&r0, &r1, EAX, T0);
3740
    EAX = r0;
3741
    EDX = r1;
3742
    CC_DST = r0;
3743
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3744
}
3745

    
3746
void helper_imulq_T0_T1(void)
3747
{
3748
    uint64_t r0, r1;
3749

    
3750
    muls64(&r0, &r1, T0, T1);
3751
    T0 = r0;
3752
    CC_DST = r0;
3753
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3754
}
3755

    
3756
void helper_divq_EAX_T0(void)
3757
{
3758
    uint64_t r0, r1;
3759
    if (T0 == 0) {
3760
        raise_exception(EXCP00_DIVZ);
3761
    }
3762
    r0 = EAX;
3763
    r1 = EDX;
3764
    if (div64(&r0, &r1, T0))
3765
        raise_exception(EXCP00_DIVZ);
3766
    EAX = r0;
3767
    EDX = r1;
3768
}
3769

    
3770
void helper_idivq_EAX_T0(void)
3771
{
3772
    uint64_t r0, r1;
3773
    if (T0 == 0) {
3774
        raise_exception(EXCP00_DIVZ);
3775
    }
3776
    r0 = EAX;
3777
    r1 = EDX;
3778
    if (idiv64(&r0, &r1, T0))
3779
        raise_exception(EXCP00_DIVZ);
3780
    EAX = r0;
3781
    EDX = r1;
3782
}
3783

    
3784
void helper_bswapq_T0(void)
3785
{
3786
    T0 = bswap64(T0);
3787
}
3788
#endif
3789

    
3790
void helper_hlt(void)
3791
{
3792
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3793
    env->hflags |= HF_HALTED_MASK;
3794
    env->exception_index = EXCP_HLT;
3795
    cpu_loop_exit();
3796
}
3797

    
3798
void helper_monitor(void)
3799
{
3800
    if ((uint32_t)ECX != 0)
3801
        raise_exception(EXCP0D_GPF);
3802
    /* XXX: store address ? */
3803
}
3804

    
3805
void helper_mwait(void)
3806
{
3807
    if ((uint32_t)ECX != 0)
3808
        raise_exception(EXCP0D_GPF);
3809
    /* XXX: not complete but not completely erroneous */
3810
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3811
        /* more than one CPU: do not sleep because another CPU may
3812
           wake this one */
3813
    } else {
3814
        helper_hlt();
3815
    }
3816
}
3817

    
3818
float approx_rsqrt(float a)
3819
{
3820
    return 1.0 / sqrt(a);
3821
}
3822

    
3823
float approx_rcp(float a)
3824
{
3825
    return 1.0 / a;
3826
}
3827

    
3828
void update_fp_status(void)
3829
{
3830
    int rnd_type;
3831

    
3832
    /* set rounding mode */
3833
    switch(env->fpuc & RC_MASK) {
3834
    default:
3835
    case RC_NEAR:
3836
        rnd_type = float_round_nearest_even;
3837
        break;
3838
    case RC_DOWN:
3839
        rnd_type = float_round_down;
3840
        break;
3841
    case RC_UP:
3842
        rnd_type = float_round_up;
3843
        break;
3844
    case RC_CHOP:
3845
        rnd_type = float_round_to_zero;
3846
        break;
3847
    }
3848
    set_float_rounding_mode(rnd_type, &env->fp_status);
3849
#ifdef FLOATX80
3850
    switch((env->fpuc >> 8) & 3) {
3851
    case 0:
3852
        rnd_type = 32;
3853
        break;
3854
    case 2:
3855
        rnd_type = 64;
3856
        break;
3857
    case 3:
3858
    default:
3859
        rnd_type = 80;
3860
        break;
3861
    }
3862
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3863
#endif
3864
}
3865

    
3866
#if !defined(CONFIG_USER_ONLY)
3867

    
3868
#define MMUSUFFIX _mmu
3869
#ifdef __s390__
3870
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3871
#else
3872
# define GETPC() (__builtin_return_address(0))
3873
#endif
3874

    
3875
#define SHIFT 0
3876
#include "softmmu_template.h"
3877

    
3878
#define SHIFT 1
3879
#include "softmmu_template.h"
3880

    
3881
#define SHIFT 2
3882
#include "softmmu_template.h"
3883

    
3884
#define SHIFT 3
3885
#include "softmmu_template.h"
3886

    
3887
#endif
3888

    
3889
/* try to fill the TLB and return an exception if error. If retaddr is
3890
   NULL, it means that the function was called in C code (i.e. not
3891
   from generated code or from helper.c) */
3892
/* XXX: fix it to restore all registers */
3893
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3894
{
3895
    TranslationBlock *tb;
3896
    int ret;
3897
    unsigned long pc;
3898
    CPUX86State *saved_env;
3899

    
3900
    /* XXX: hack to restore env in all cases, even if not called from
3901
       generated code */
3902
    saved_env = env;
3903
    env = cpu_single_env;
3904

    
3905
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3906
    if (ret) {
3907
        if (retaddr) {
3908
            /* now we have a real cpu fault */
3909
            pc = (unsigned long)retaddr;
3910
            tb = tb_find_pc(pc);
3911
            if (tb) {
3912
                /* the PC is inside the translated code. It means that we have
3913
                   a virtual CPU fault */
3914
                cpu_restore_state(tb, env, pc, NULL);
3915
            }
3916
        }
3917
        if (retaddr)
3918
            raise_exception_err(env->exception_index, env->error_code);
3919
        else
3920
            raise_exception_err_norestore(env->exception_index, env->error_code);
3921
    }
3922
    env = saved_env;
3923
}
3924

    
3925

    
3926
/* Secure Virtual Machine helpers */
3927

    
3928
void helper_stgi(void)
3929
{
3930
    env->hflags |= HF_GIF_MASK;
3931
}
3932

    
3933
void helper_clgi(void)
3934
{
3935
    env->hflags &= ~HF_GIF_MASK;
3936
}
3937

    
3938
#if defined(CONFIG_USER_ONLY)
3939

    
3940
void helper_vmrun(target_ulong addr) { }
3941
void helper_vmmcall(void) { }
3942
void helper_vmload(target_ulong addr) { }
3943
void helper_vmsave(target_ulong addr) { }
3944
void helper_skinit(void) { }
3945
void helper_invlpga(void) { }
3946
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3947
int svm_check_intercept_param(uint32_t type, uint64_t param)
3948
{
3949
    return 0;
3950
}
3951

    
3952
#else
3953

    
3954
static inline uint32_t
3955
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3956
{
3957
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3958
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3959
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3960
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3961
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3962
}
3963

    
3964
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3965
{
3966
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3967
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
3968
}
3969

    
3970
extern uint8_t *phys_ram_base;
3971
void helper_vmrun(target_ulong addr)
3972
{
3973
    uint32_t event_inj;
3974
    uint32_t int_ctl;
3975

    
3976
    if (loglevel & CPU_LOG_TB_IN_ASM)
3977
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3978

    
3979
    env->vm_vmcb = addr;
3980
    regs_to_env();
3981

    
3982
    /* save the current CPU state in the hsave page */
3983
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3984
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3985

    
3986
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3987
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3988

    
3989
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3990
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3991
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3992
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3993
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3994
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3995
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3996

    
3997
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3998
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
3999

    
4000
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4001
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4002
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4003
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4004

    
4005
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4006
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4007
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4008

    
4009
    /* load the interception bitmaps so we do not need to access the
4010
       vmcb in svm mode */
4011
    /* We shift all the intercept bits so we can OR them with the TB
4012
       flags later on */
4013
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4014
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4015
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4016
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4017
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4018
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4019

    
4020
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4021
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4022

    
4023
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4024
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4025

    
4026
    /* clear exit_info_2 so we behave like the real hardware */
4027
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4028

    
4029
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4030
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4031
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4032
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4033
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4034
    if (int_ctl & V_INTR_MASKING_MASK) {
4035
        env->cr[8] = int_ctl & V_TPR_MASK;
4036
        if (env->eflags & IF_MASK)
4037
            env->hflags |= HF_HIF_MASK;
4038
    }
4039

    
4040
#ifdef TARGET_X86_64
4041
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4042
    env->hflags &= ~HF_LMA_MASK;
4043
    if (env->efer & MSR_EFER_LMA)
4044
       env->hflags |= HF_LMA_MASK;
4045
#endif
4046
    env->eflags = 0;
4047
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4048
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4049
    CC_OP = CC_OP_EFLAGS;
4050
    CC_DST = 0xffffffff;
4051

    
4052
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4053
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4054
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4055
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4056

    
4057
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4058
    env->eip = EIP;
4059
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4060
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4061
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4062
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4063
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4064

    
4065
    /* FIXME: guest state consistency checks */
4066

    
4067
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4068
        case TLB_CONTROL_DO_NOTHING:
4069
            break;
4070
        case TLB_CONTROL_FLUSH_ALL_ASID:
4071
            /* FIXME: this is not 100% correct but should work for now */
4072
            tlb_flush(env, 1);
4073
        break;
4074
    }
4075

    
4076
    helper_stgi();
4077

    
4078
    regs_to_env();
4079

    
4080
    /* maybe we need to inject an event */
4081
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4082
    if (event_inj & SVM_EVTINJ_VALID) {
4083
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4084
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4085
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4086
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4087

    
4088
        if (loglevel & CPU_LOG_TB_IN_ASM)
4089
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4090
        /* FIXME: need to implement valid_err */
4091
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4092
        case SVM_EVTINJ_TYPE_INTR:
4093
                env->exception_index = vector;
4094
                env->error_code = event_inj_err;
4095
                env->exception_is_int = 1;
4096
                env->exception_next_eip = -1;
4097
                if (loglevel & CPU_LOG_TB_IN_ASM)
4098
                    fprintf(logfile, "INTR");
4099
                break;
4100
        case SVM_EVTINJ_TYPE_NMI:
4101
                env->exception_index = vector;
4102
                env->error_code = event_inj_err;
4103
                env->exception_is_int = 1;
4104
                env->exception_next_eip = EIP;
4105
                if (loglevel & CPU_LOG_TB_IN_ASM)
4106
                    fprintf(logfile, "NMI");
4107
                break;
4108
        case SVM_EVTINJ_TYPE_EXEPT:
4109
                env->exception_index = vector;
4110
                env->error_code = event_inj_err;
4111
                env->exception_is_int = 0;
4112
                env->exception_next_eip = -1;
4113
                if (loglevel & CPU_LOG_TB_IN_ASM)
4114
                    fprintf(logfile, "EXEPT");
4115
                break;
4116
        case SVM_EVTINJ_TYPE_SOFT:
4117
                env->exception_index = vector;
4118
                env->error_code = event_inj_err;
4119
                env->exception_is_int = 1;
4120
                env->exception_next_eip = EIP;
4121
                if (loglevel & CPU_LOG_TB_IN_ASM)
4122
                    fprintf(logfile, "SOFT");
4123
                break;
4124
        }
4125
        if (loglevel & CPU_LOG_TB_IN_ASM)
4126
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4127
    }
4128
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4129
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4130
    }
4131

    
4132
    cpu_loop_exit();
4133
}
4134

    
4135
void helper_vmmcall(void)
4136
{
4137
    if (loglevel & CPU_LOG_TB_IN_ASM)
4138
        fprintf(logfile,"vmmcall!\n");
4139
}
4140

    
4141
void helper_vmload(target_ulong addr)
4142
{
4143
    if (loglevel & CPU_LOG_TB_IN_ASM)
4144
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4145
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4146
                env->segs[R_FS].base);
4147

    
4148
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4149
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4150
    SVM_LOAD_SEG2(addr, tr, tr);
4151
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4152

    
4153
#ifdef TARGET_X86_64
4154
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4155
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4156
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4157
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4158
#endif
4159
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4160
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4161
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4162
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4163
}
4164

    
4165
void helper_vmsave(target_ulong addr)
4166
{
4167
    if (loglevel & CPU_LOG_TB_IN_ASM)
4168
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4169
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4170
                env->segs[R_FS].base);
4171

    
4172
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4173
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4174
    SVM_SAVE_SEG(addr, tr, tr);
4175
    SVM_SAVE_SEG(addr, ldt, ldtr);
4176

    
4177
#ifdef TARGET_X86_64
4178
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4179
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4180
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4181
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4182
#endif
4183
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4184
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4185
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4186
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4187
}
4188

    
4189
void helper_skinit(void)
4190
{
4191
    if (loglevel & CPU_LOG_TB_IN_ASM)
4192
        fprintf(logfile,"skinit!\n");
4193
}
4194

    
4195
void helper_invlpga(void)
4196
{
4197
    tlb_flush(env, 0);
4198
}
4199

    
4200
int svm_check_intercept_param(uint32_t type, uint64_t param)
4201
{
4202
    switch(type) {
4203
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4204
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4205
            vmexit(type, param);
4206
            return 1;
4207
        }
4208
        break;
4209
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4210
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4211
            vmexit(type, param);
4212
            return 1;
4213
        }
4214
        break;
4215
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4216
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4217
            vmexit(type, param);
4218
            return 1;
4219
        }
4220
        break;
4221
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4222
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4223
            vmexit(type, param);
4224
            return 1;
4225
        }
4226
        break;
4227
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4228
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4229
            vmexit(type, param);
4230
            return 1;
4231
        }
4232
        break;
4233
    case SVM_EXIT_IOIO:
4234
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4235
            /* FIXME: this should be read in at vmrun (faster this way?) */
4236
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4237
            uint16_t port = (uint16_t) (param >> 16);
4238

    
4239
            if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4240
                vmexit(type, param);
4241
        }
4242
        break;
4243

    
4244
    case SVM_EXIT_MSR:
4245
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4246
            /* FIXME: this should be read in at vmrun (faster this way?) */
4247
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4248
            switch((uint32_t)ECX) {
4249
            case 0 ... 0x1fff:
4250
                T0 = (ECX * 2) % 8;
4251
                T1 = ECX / 8;
4252
                break;
4253
            case 0xc0000000 ... 0xc0001fff:
4254
                T0 = (8192 + ECX - 0xc0000000) * 2;
4255
                T1 = (T0 / 8);
4256
                T0 %= 8;
4257
                break;
4258
            case 0xc0010000 ... 0xc0011fff:
4259
                T0 = (16384 + ECX - 0xc0010000) * 2;
4260
                T1 = (T0 / 8);
4261
                T0 %= 8;
4262
                break;
4263
            default:
4264
                vmexit(type, param);
4265
                return 1;
4266
            }
4267
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4268
                vmexit(type, param);
4269
            return 1;
4270
        }
4271
        break;
4272
    default:
4273
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4274
            vmexit(type, param);
4275
            return 1;
4276
        }
4277
        break;
4278
    }
4279
    return 0;
4280
}
4281

    
4282
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4283
{
4284
    uint32_t int_ctl;
4285

    
4286
    if (loglevel & CPU_LOG_TB_IN_ASM)
4287
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4288
                exit_code, exit_info_1,
4289
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4290
                EIP);
4291

    
4292
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4293
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4294
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4295
    } else {
4296
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4297
    }
4298

    
4299
    /* Save the VM state in the vmcb */
4300
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4301
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4302
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4303
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4304

    
4305
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4306
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4307

    
4308
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4309
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4310

    
4311
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4312
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4313
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4314
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4315
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4316

    
4317
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4318
        int_ctl &= ~V_TPR_MASK;
4319
        int_ctl |= env->cr[8] & V_TPR_MASK;
4320
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4321
    }
4322

    
4323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4324
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4325
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4326
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4328
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4329
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4330

    
4331
    /* Reload the host state from vm_hsave */
4332
    env->hflags &= ~HF_HIF_MASK;
4333
    env->intercept = 0;
4334
    env->intercept_exceptions = 0;
4335
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4336

    
4337
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4338
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4339

    
4340
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4341
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4342

    
4343
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4344
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4345
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4346
    if (int_ctl & V_INTR_MASKING_MASK)
4347
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4348
    /* we need to set the efer after the crs so the hidden flags get set properly */
4349
#ifdef TARGET_X86_64
4350
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4351
    env->hflags &= ~HF_LMA_MASK;
4352
    if (env->efer & MSR_EFER_LMA)
4353
       env->hflags |= HF_LMA_MASK;
4354
#endif
4355

    
4356
    env->eflags = 0;
4357
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4358
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4359
    CC_OP = CC_OP_EFLAGS;
4360

    
4361
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4362
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4363
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4364
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4365

    
4366
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4367
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4368
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4369

    
4370
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4371
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4372

    
4373
    /* other setups */
4374
    cpu_x86_set_cpl(env, 0);
4375
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4376
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4377
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4378

    
4379
    helper_clgi();
4380
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4381

    
4382
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4383

    
4384
    /* Clears the TSC_OFFSET inside the processor. */
4385

    
4386
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4387
       from the page table indicated the host's CR3. If the PDPEs contain
4388
       illegal state, the processor causes a shutdown. */
4389

    
4390
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4391
    env->cr[0] |= CR0_PE_MASK;
4392
    env->eflags &= ~VM_MASK;
4393

    
4394
    /* Disables all breakpoints in the host DR7 register. */
4395

    
4396
    /* Checks the reloaded host state for consistency. */
4397

    
4398
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4399
       host's code segment or non-canonical (in the case of long mode), a
4400
       #GP fault is delivered inside the host.) */
4401

    
4402
    /* remove any pending exception */
4403
    env->exception_index = -1;
4404
    env->error_code = 0;
4405
    env->old_exception = -1;
4406

    
4407
    regs_to_env();
4408
    cpu_loop_exit();
4409
}
4410

    
4411
#endif