Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 3f47aa8c

History | View | Annotate | Download (130.8 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        if (code64)
1012
            env->eip = env->lstar;
1013
        else
1014
            env->eip = env->cstar;
1015
    } else
1016
#endif
1017
    {
1018
        ECX = (uint32_t)(env->eip + next_eip_addend);
1019

    
1020
        cpu_x86_set_cpl(env, 0);
1021
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022
                           0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027
                               0, 0xffffffff,
1028
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029
                               DESC_S_MASK |
1030
                               DESC_W_MASK | DESC_A_MASK);
1031
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1032
        env->eip = (uint32_t)env->star;
1033
    }
1034
}
1035
#endif
1036

    
1037
void helper_sysret(int dflag)
1038
{
1039
    int cpl, selector;
1040

    
1041
    if (!(env->efer & MSR_EFER_SCE)) {
1042
        raise_exception_err(EXCP06_ILLOP, 0);
1043
    }
1044
    cpl = env->hflags & HF_CPL_MASK;
1045
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046
        raise_exception_err(EXCP0D_GPF, 0);
1047
    }
1048
    selector = (env->star >> 48) & 0xffff;
1049
#ifdef TARGET_X86_64
1050
    if (env->hflags & HF_LMA_MASK) {
1051
        if (dflag == 2) {
1052
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1053
                                   0, 0xffffffff,
1054
                                   DESC_G_MASK | DESC_P_MASK |
1055
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1057
                                   DESC_L_MASK);
1058
            env->eip = ECX;
1059
        } else {
1060
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1061
                                   0, 0xffffffff,
1062
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065
            env->eip = (uint32_t)ECX;
1066
        }
1067
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1068
                               0, 0xffffffff,
1069
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071
                               DESC_W_MASK | DESC_A_MASK);
1072
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1073
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1074
        cpu_x86_set_cpl(env, 3);
1075
    } else
1076
#endif
1077
    {
1078
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079
                               0, 0xffffffff,
1080
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083
        env->eip = (uint32_t)ECX;
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        env->eflags |= IF_MASK;
1090
        cpu_x86_set_cpl(env, 3);
1091
    }
1092
#ifdef USE_KQEMU
1093
    if (kqemu_is_ok(env)) {
1094
        if (env->hflags & HF_LMA_MASK)
1095
            CC_OP = CC_OP_EFLAGS;
1096
        env->exception_index = -1;
1097
        cpu_loop_exit();
1098
    }
1099
#endif
1100
}
1101

    
1102
/* real mode interrupt */
1103
static void do_interrupt_real(int intno, int is_int, int error_code,
1104
                              unsigned int next_eip)
1105
{
1106
    SegmentCache *dt;
1107
    target_ulong ptr, ssp;
1108
    int selector;
1109
    uint32_t offset, esp;
1110
    uint32_t old_cs, old_eip;
1111
    int svm_should_check = 1;
1112

    
1113
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1114
        next_eip = EIP;
1115
        svm_should_check = 0;
1116
    }
1117
    if (svm_should_check
1118
        && INTERCEPTEDl(_exceptions, 1 << intno)
1119
        && !is_int) {
1120
        raise_interrupt(intno, is_int, error_code, 0);
1121
    }
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privledge if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    char first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    char second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: %x new %x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    }
1280

    
1281
    env->exception_index = intno;
1282
    env->error_code = error_code;
1283
    env->exception_is_int = is_int;
1284
    env->exception_next_eip = env->eip + next_eip_addend;
1285
    cpu_loop_exit();
1286
}
1287

    
1288
/* same as raise_exception_err, but do not restore global registers */
1289
static void raise_exception_err_norestore(int exception_index, int error_code)
1290
{
1291
    exception_index = check_exception(exception_index, &error_code);
1292

    
1293
    env->exception_index = exception_index;
1294
    env->error_code = error_code;
1295
    env->exception_is_int = 0;
1296
    env->exception_next_eip = 0;
1297
    longjmp(env->jmp_env, 1);
1298
}
1299

    
1300
/* shortcuts to generate exceptions */
1301

    
1302
void (raise_exception_err)(int exception_index, int error_code)
1303
{
1304
    raise_interrupt(exception_index, 0, error_code, 0);
1305
}
1306

    
1307
void raise_exception(int exception_index)
1308
{
1309
    raise_interrupt(exception_index, 0, 0, 0);
1310
}
1311

    
1312
/* SMM support */
1313

    
1314
#if defined(CONFIG_USER_ONLY)
1315

    
1316
void do_smm_enter(void)
1317
{
1318
}
1319

    
1320
void helper_rsm(void)
1321
{
1322
}
1323

    
1324
#else
1325

    
1326
#ifdef TARGET_X86_64
1327
#define SMM_REVISION_ID 0x00020064
1328
#else
1329
#define SMM_REVISION_ID 0x00020000
1330
#endif
1331

    
1332
void do_smm_enter(void)
1333
{
1334
    target_ulong sm_state;
1335
    SegmentCache *dt;
1336
    int i, offset;
1337

    
1338
    if (loglevel & CPU_LOG_INT) {
1339
        fprintf(logfile, "SMM: enter\n");
1340
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1341
    }
1342

    
1343
    env->hflags |= HF_SMM_MASK;
1344
    cpu_smm_update(env);
1345

    
1346
    sm_state = env->smbase + 0x8000;
1347

    
1348
#ifdef TARGET_X86_64
1349
    for(i = 0; i < 6; i++) {
1350
        dt = &env->segs[i];
1351
        offset = 0x7e00 + i * 16;
1352
        stw_phys(sm_state + offset, dt->selector);
1353
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1354
        stl_phys(sm_state + offset + 4, dt->limit);
1355
        stq_phys(sm_state + offset + 8, dt->base);
1356
    }
1357

    
1358
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1359
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1360

    
1361
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1362
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1363
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1364
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1365

    
1366
    stq_phys(sm_state + 0x7e88, env->idt.base);
1367
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1368

    
1369
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1370
    stq_phys(sm_state + 0x7e98, env->tr.base);
1371
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1372
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1373

    
1374
    stq_phys(sm_state + 0x7ed0, env->efer);
1375

    
1376
    stq_phys(sm_state + 0x7ff8, EAX);
1377
    stq_phys(sm_state + 0x7ff0, ECX);
1378
    stq_phys(sm_state + 0x7fe8, EDX);
1379
    stq_phys(sm_state + 0x7fe0, EBX);
1380
    stq_phys(sm_state + 0x7fd8, ESP);
1381
    stq_phys(sm_state + 0x7fd0, EBP);
1382
    stq_phys(sm_state + 0x7fc8, ESI);
1383
    stq_phys(sm_state + 0x7fc0, EDI);
1384
    for(i = 8; i < 16; i++)
1385
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1386
    stq_phys(sm_state + 0x7f78, env->eip);
1387
    stl_phys(sm_state + 0x7f70, compute_eflags());
1388
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1389
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1390

    
1391
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1392
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1393
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1394

    
1395
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1396
    stl_phys(sm_state + 0x7f00, env->smbase);
1397
#else
1398
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1399
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1400
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1401
    stl_phys(sm_state + 0x7ff0, env->eip);
1402
    stl_phys(sm_state + 0x7fec, EDI);
1403
    stl_phys(sm_state + 0x7fe8, ESI);
1404
    stl_phys(sm_state + 0x7fe4, EBP);
1405
    stl_phys(sm_state + 0x7fe0, ESP);
1406
    stl_phys(sm_state + 0x7fdc, EBX);
1407
    stl_phys(sm_state + 0x7fd8, EDX);
1408
    stl_phys(sm_state + 0x7fd4, ECX);
1409
    stl_phys(sm_state + 0x7fd0, EAX);
1410
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1411
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1412

    
1413
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1414
    stl_phys(sm_state + 0x7f64, env->tr.base);
1415
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1416
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1417

    
1418
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1419
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1420
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1421
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1424
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1425

    
1426
    stl_phys(sm_state + 0x7f58, env->idt.base);
1427
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1428

    
1429
    for(i = 0; i < 6; i++) {
1430
        dt = &env->segs[i];
1431
        if (i < 3)
1432
            offset = 0x7f84 + i * 12;
1433
        else
1434
            offset = 0x7f2c + (i - 3) * 12;
1435
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1436
        stl_phys(sm_state + offset + 8, dt->base);
1437
        stl_phys(sm_state + offset + 4, dt->limit);
1438
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1439
    }
1440
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1441

    
1442
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443
    stl_phys(sm_state + 0x7ef8, env->smbase);
1444
#endif
1445
    /* init SMM cpu state */
1446

    
1447
#ifdef TARGET_X86_64
1448
    env->efer = 0;
1449
    env->hflags &= ~HF_LMA_MASK;
1450
#endif
1451
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1452
    env->eip = 0x00008000;
1453
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1454
                           0xffffffff, 0);
1455
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1460

    
1461
    cpu_x86_update_cr0(env,
1462
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1463
    cpu_x86_update_cr4(env, 0);
1464
    env->dr[7] = 0x00000400;
1465
    CC_OP = CC_OP_EFLAGS;
1466
}
1467

    
1468
void helper_rsm(void)
1469
{
1470
    target_ulong sm_state;
1471
    int i, offset;
1472
    uint32_t val;
1473

    
1474
    sm_state = env->smbase + 0x8000;
1475
#ifdef TARGET_X86_64
1476
    env->efer = ldq_phys(sm_state + 0x7ed0);
1477
    if (env->efer & MSR_EFER_LMA)
1478
        env->hflags |= HF_LMA_MASK;
1479
    else
1480
        env->hflags &= ~HF_LMA_MASK;
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
#ifdef BUGGY_GCC_DIV64
1596
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597
   call it from another function */
1598
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1599
{
1600
    *q_ptr = num / den;
1601
    return num % den;
1602
}
1603

    
1604
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1605
{
1606
    *q_ptr = num / den;
1607
    return num % den;
1608
}
1609
#endif
1610

    
1611
void helper_divl_EAX_T0(target_ulong t0)
1612
{
1613
    unsigned int den, r;
1614
    uint64_t num, q;
1615

    
1616
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1617
    den = t0;
1618
    if (den == 0) {
1619
        raise_exception(EXCP00_DIVZ);
1620
    }
1621
#ifdef BUGGY_GCC_DIV64
1622
    r = div32(&q, num, den);
1623
#else
1624
    q = (num / den);
1625
    r = (num % den);
1626
#endif
1627
    if (q > 0xffffffff)
1628
        raise_exception(EXCP00_DIVZ);
1629
    EAX = (uint32_t)q;
1630
    EDX = (uint32_t)r;
1631
}
1632

    
1633
void helper_idivl_EAX_T0(target_ulong t0)
1634
{
1635
    int den, r;
1636
    int64_t num, q;
1637

    
1638
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1639
    den = t0;
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
#ifdef BUGGY_GCC_DIV64
1644
    r = idiv32(&q, num, den);
1645
#else
1646
    q = (num / den);
1647
    r = (num % den);
1648
#endif
1649
    if (q != (int32_t)q)
1650
        raise_exception(EXCP00_DIVZ);
1651
    EAX = (uint32_t)q;
1652
    EDX = (uint32_t)r;
1653
}
1654

    
1655
void helper_cmpxchg8b(void)
1656
{
1657
    uint64_t d;
1658
    int eflags;
1659

    
1660
    eflags = cc_table[CC_OP].compute_all();
1661
    d = ldq(A0);
1662
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1663
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1664
        eflags |= CC_Z;
1665
    } else {
1666
        EDX = d >> 32;
1667
        EAX = d;
1668
        eflags &= ~CC_Z;
1669
    }
1670
    CC_SRC = eflags;
1671
}
1672

    
1673
void helper_single_step(void)
1674
{
1675
    env->dr[6] |= 0x4000;
1676
    raise_exception(EXCP01_SSTP);
1677
}
1678

    
1679
void helper_cpuid(void)
1680
{
1681
    uint32_t index;
1682
    index = (uint32_t)EAX;
1683

    
1684
    /* test if maximum index reached */
1685
    if (index & 0x80000000) {
1686
        if (index > env->cpuid_xlevel)
1687
            index = env->cpuid_level;
1688
    } else {
1689
        if (index > env->cpuid_level)
1690
            index = env->cpuid_level;
1691
    }
1692

    
1693
    switch(index) {
1694
    case 0:
1695
        EAX = env->cpuid_level;
1696
        EBX = env->cpuid_vendor1;
1697
        EDX = env->cpuid_vendor2;
1698
        ECX = env->cpuid_vendor3;
1699
        break;
1700
    case 1:
1701
        EAX = env->cpuid_version;
1702
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703
        ECX = env->cpuid_ext_features;
1704
        EDX = env->cpuid_features;
1705
        break;
1706
    case 2:
1707
        /* cache info: needed for Pentium Pro compatibility */
1708
        EAX = 1;
1709
        EBX = 0;
1710
        ECX = 0;
1711
        EDX = 0x2c307d;
1712
        break;
1713
    case 0x80000000:
1714
        EAX = env->cpuid_xlevel;
1715
        EBX = env->cpuid_vendor1;
1716
        EDX = env->cpuid_vendor2;
1717
        ECX = env->cpuid_vendor3;
1718
        break;
1719
    case 0x80000001:
1720
        EAX = env->cpuid_features;
1721
        EBX = 0;
1722
        ECX = env->cpuid_ext3_features;
1723
        EDX = env->cpuid_ext2_features;
1724
        break;
1725
    case 0x80000002:
1726
    case 0x80000003:
1727
    case 0x80000004:
1728
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1729
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1730
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1731
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1732
        break;
1733
    case 0x80000005:
1734
        /* cache info (L1 cache) */
1735
        EAX = 0x01ff01ff;
1736
        EBX = 0x01ff01ff;
1737
        ECX = 0x40020140;
1738
        EDX = 0x40020140;
1739
        break;
1740
    case 0x80000006:
1741
        /* cache info (L2 cache) */
1742
        EAX = 0;
1743
        EBX = 0x42004200;
1744
        ECX = 0x02008140;
1745
        EDX = 0;
1746
        break;
1747
    case 0x80000008:
1748
        /* virtual & phys address size in low 2 bytes. */
1749
        EAX = 0x00003028;
1750
        EBX = 0;
1751
        ECX = 0;
1752
        EDX = 0;
1753
        break;
1754
    case 0x8000000A:
1755
        EAX = 0x00000001;
1756
        EBX = 0;
1757
        ECX = 0;
1758
        EDX = 0;
1759
        break;
1760
    default:
1761
        /* reserved values: zero */
1762
        EAX = 0;
1763
        EBX = 0;
1764
        ECX = 0;
1765
        EDX = 0;
1766
        break;
1767
    }
1768
}
1769

    
1770
void helper_enter_level(int level, int data32)
1771
{
1772
    target_ulong ssp;
1773
    uint32_t esp_mask, esp, ebp;
1774

    
1775
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1776
    ssp = env->segs[R_SS].base;
1777
    ebp = EBP;
1778
    esp = ESP;
1779
    if (data32) {
1780
        /* 32 bit */
1781
        esp -= 4;
1782
        while (--level) {
1783
            esp -= 4;
1784
            ebp -= 4;
1785
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1786
        }
1787
        esp -= 4;
1788
        stl(ssp + (esp & esp_mask), T1);
1789
    } else {
1790
        /* 16 bit */
1791
        esp -= 2;
1792
        while (--level) {
1793
            esp -= 2;
1794
            ebp -= 2;
1795
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1796
        }
1797
        esp -= 2;
1798
        stw(ssp + (esp & esp_mask), T1);
1799
    }
1800
}
1801

    
1802
#ifdef TARGET_X86_64
1803
void helper_enter64_level(int level, int data64)
1804
{
1805
    target_ulong esp, ebp;
1806
    ebp = EBP;
1807
    esp = ESP;
1808

    
1809
    if (data64) {
1810
        /* 64 bit */
1811
        esp -= 8;
1812
        while (--level) {
1813
            esp -= 8;
1814
            ebp -= 8;
1815
            stq(esp, ldq(ebp));
1816
        }
1817
        esp -= 8;
1818
        stq(esp, T1);
1819
    } else {
1820
        /* 16 bit */
1821
        esp -= 2;
1822
        while (--level) {
1823
            esp -= 2;
1824
            ebp -= 2;
1825
            stw(esp, lduw(ebp));
1826
        }
1827
        esp -= 2;
1828
        stw(esp, T1);
1829
    }
1830
}
1831
#endif
1832

    
1833
void helper_lldt_T0(void)
1834
{
1835
    int selector;
1836
    SegmentCache *dt;
1837
    uint32_t e1, e2;
1838
    int index, entry_limit;
1839
    target_ulong ptr;
1840

    
1841
    selector = T0 & 0xffff;
1842
    if ((selector & 0xfffc) == 0) {
1843
        /* XXX: NULL selector case: invalid LDT */
1844
        env->ldt.base = 0;
1845
        env->ldt.limit = 0;
1846
    } else {
1847
        if (selector & 0x4)
1848
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1849
        dt = &env->gdt;
1850
        index = selector & ~7;
1851
#ifdef TARGET_X86_64
1852
        if (env->hflags & HF_LMA_MASK)
1853
            entry_limit = 15;
1854
        else
1855
#endif
1856
            entry_limit = 7;
1857
        if ((index + entry_limit) > dt->limit)
1858
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1859
        ptr = dt->base + index;
1860
        e1 = ldl_kernel(ptr);
1861
        e2 = ldl_kernel(ptr + 4);
1862
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1863
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1864
        if (!(e2 & DESC_P_MASK))
1865
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1866
#ifdef TARGET_X86_64
1867
        if (env->hflags & HF_LMA_MASK) {
1868
            uint32_t e3;
1869
            e3 = ldl_kernel(ptr + 8);
1870
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1871
            env->ldt.base |= (target_ulong)e3 << 32;
1872
        } else
1873
#endif
1874
        {
1875
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1876
        }
1877
    }
1878
    env->ldt.selector = selector;
1879
}
1880

    
1881
void helper_ltr_T0(void)
1882
{
1883
    int selector;
1884
    SegmentCache *dt;
1885
    uint32_t e1, e2;
1886
    int index, type, entry_limit;
1887
    target_ulong ptr;
1888

    
1889
    selector = T0 & 0xffff;
1890
    if ((selector & 0xfffc) == 0) {
1891
        /* NULL selector case: invalid TR */
1892
        env->tr.base = 0;
1893
        env->tr.limit = 0;
1894
        env->tr.flags = 0;
1895
    } else {
1896
        if (selector & 0x4)
1897
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1898
        dt = &env->gdt;
1899
        index = selector & ~7;
1900
#ifdef TARGET_X86_64
1901
        if (env->hflags & HF_LMA_MASK)
1902
            entry_limit = 15;
1903
        else
1904
#endif
1905
            entry_limit = 7;
1906
        if ((index + entry_limit) > dt->limit)
1907
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1908
        ptr = dt->base + index;
1909
        e1 = ldl_kernel(ptr);
1910
        e2 = ldl_kernel(ptr + 4);
1911
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1912
        if ((e2 & DESC_S_MASK) ||
1913
            (type != 1 && type != 9))
1914
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1915
        if (!(e2 & DESC_P_MASK))
1916
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1917
#ifdef TARGET_X86_64
1918
        if (env->hflags & HF_LMA_MASK) {
1919
            uint32_t e3, e4;
1920
            e3 = ldl_kernel(ptr + 8);
1921
            e4 = ldl_kernel(ptr + 12);
1922
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1923
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1924
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1925
            env->tr.base |= (target_ulong)e3 << 32;
1926
        } else
1927
#endif
1928
        {
1929
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1930
        }
1931
        e2 |= DESC_TSS_BUSY_MASK;
1932
        stl_kernel(ptr + 4, e2);
1933
    }
1934
    env->tr.selector = selector;
1935
}
1936

    
1937
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1938
void load_seg(int seg_reg, int selector)
1939
{
1940
    uint32_t e1, e2;
1941
    int cpl, dpl, rpl;
1942
    SegmentCache *dt;
1943
    int index;
1944
    target_ulong ptr;
1945

    
1946
    selector &= 0xffff;
1947
    cpl = env->hflags & HF_CPL_MASK;
1948
    if ((selector & 0xfffc) == 0) {
1949
        /* null selector case */
1950
        if (seg_reg == R_SS
1951
#ifdef TARGET_X86_64
1952
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1953
#endif
1954
            )
1955
            raise_exception_err(EXCP0D_GPF, 0);
1956
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1957
    } else {
1958

    
1959
        if (selector & 0x4)
1960
            dt = &env->ldt;
1961
        else
1962
            dt = &env->gdt;
1963
        index = selector & ~7;
1964
        if ((index + 7) > dt->limit)
1965
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1966
        ptr = dt->base + index;
1967
        e1 = ldl_kernel(ptr);
1968
        e2 = ldl_kernel(ptr + 4);
1969

    
1970
        if (!(e2 & DESC_S_MASK))
1971
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1972
        rpl = selector & 3;
1973
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1974
        if (seg_reg == R_SS) {
1975
            /* must be writable segment */
1976
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1977
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1978
            if (rpl != cpl || dpl != cpl)
1979
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1980
        } else {
1981
            /* must be readable segment */
1982
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1983
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1984

    
1985
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1986
                /* if not conforming code, test rights */
1987
                if (dpl < cpl || dpl < rpl)
1988
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1989
            }
1990
        }
1991

    
1992
        if (!(e2 & DESC_P_MASK)) {
1993
            if (seg_reg == R_SS)
1994
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1995
            else
1996
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1997
        }
1998

    
1999
        /* set the access bit if not already set */
2000
        if (!(e2 & DESC_A_MASK)) {
2001
            e2 |= DESC_A_MASK;
2002
            stl_kernel(ptr + 4, e2);
2003
        }
2004

    
2005
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2006
                       get_seg_base(e1, e2),
2007
                       get_seg_limit(e1, e2),
2008
                       e2);
2009
#if 0
2010
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2011
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2012
#endif
2013
    }
2014
}
2015

    
2016
/* protected mode jump */
2017
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2018
{
2019
    int new_cs, gate_cs, type;
2020
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2021
    target_ulong new_eip, next_eip;
2022

    
2023
    new_cs = T0;
2024
    new_eip = T1;
2025
    if ((new_cs & 0xfffc) == 0)
2026
        raise_exception_err(EXCP0D_GPF, 0);
2027
    if (load_segment(&e1, &e2, new_cs) != 0)
2028
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2029
    cpl = env->hflags & HF_CPL_MASK;
2030
    if (e2 & DESC_S_MASK) {
2031
        if (!(e2 & DESC_CS_MASK))
2032
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2033
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2034
        if (e2 & DESC_C_MASK) {
2035
            /* conforming code segment */
2036
            if (dpl > cpl)
2037
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2038
        } else {
2039
            /* non conforming code segment */
2040
            rpl = new_cs & 3;
2041
            if (rpl > cpl)
2042
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2043
            if (dpl != cpl)
2044
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2045
        }
2046
        if (!(e2 & DESC_P_MASK))
2047
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2048
        limit = get_seg_limit(e1, e2);
2049
        if (new_eip > limit &&
2050
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2051
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2052
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2053
                       get_seg_base(e1, e2), limit, e2);
2054
        EIP = new_eip;
2055
    } else {
2056
        /* jump to call or task gate */
2057
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2058
        rpl = new_cs & 3;
2059
        cpl = env->hflags & HF_CPL_MASK;
2060
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2061
        switch(type) {
2062
        case 1: /* 286 TSS */
2063
        case 9: /* 386 TSS */
2064
        case 5: /* task gate */
2065
            if (dpl < cpl || dpl < rpl)
2066
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2067
            next_eip = env->eip + next_eip_addend;
2068
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2069
            CC_OP = CC_OP_EFLAGS;
2070
            break;
2071
        case 4: /* 286 call gate */
2072
        case 12: /* 386 call gate */
2073
            if ((dpl < cpl) || (dpl < rpl))
2074
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2075
            if (!(e2 & DESC_P_MASK))
2076
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2077
            gate_cs = e1 >> 16;
2078
            new_eip = (e1 & 0xffff);
2079
            if (type == 12)
2080
                new_eip |= (e2 & 0xffff0000);
2081
            if (load_segment(&e1, &e2, gate_cs) != 0)
2082
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2083
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2084
            /* must be code segment */
2085
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2086
                 (DESC_S_MASK | DESC_CS_MASK)))
2087
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2088
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2089
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2090
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2091
            if (!(e2 & DESC_P_MASK))
2092
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2093
            limit = get_seg_limit(e1, e2);
2094
            if (new_eip > limit)
2095
                raise_exception_err(EXCP0D_GPF, 0);
2096
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2097
                                   get_seg_base(e1, e2), limit, e2);
2098
            EIP = new_eip;
2099
            break;
2100
        default:
2101
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2102
            break;
2103
        }
2104
    }
2105
}
2106

    
2107
/* real mode call */
2108
void helper_lcall_real_T0_T1(int shift, int next_eip)
2109
{
2110
    int new_cs, new_eip;
2111
    uint32_t esp, esp_mask;
2112
    target_ulong ssp;
2113

    
2114
    new_cs = T0;
2115
    new_eip = T1;
2116
    esp = ESP;
2117
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2118
    ssp = env->segs[R_SS].base;
2119
    if (shift) {
2120
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2121
        PUSHL(ssp, esp, esp_mask, next_eip);
2122
    } else {
2123
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2124
        PUSHW(ssp, esp, esp_mask, next_eip);
2125
    }
2126

    
2127
    SET_ESP(esp, esp_mask);
2128
    env->eip = new_eip;
2129
    env->segs[R_CS].selector = new_cs;
2130
    env->segs[R_CS].base = (new_cs << 4);
2131
}
2132

    
2133
/* protected mode call */
2134
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2135
{
2136
    int new_cs, new_stack, i;
2137
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2138
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2139
    uint32_t val, limit, old_sp_mask;
2140
    target_ulong ssp, old_ssp, next_eip, new_eip;
2141

    
2142
    new_cs = T0;
2143
    new_eip = T1;
2144
    next_eip = env->eip + next_eip_addend;
2145
#ifdef DEBUG_PCALL
2146
    if (loglevel & CPU_LOG_PCALL) {
2147
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2148
                new_cs, (uint32_t)new_eip, shift);
2149
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2150
    }
2151
#endif
2152
    if ((new_cs & 0xfffc) == 0)
2153
        raise_exception_err(EXCP0D_GPF, 0);
2154
    if (load_segment(&e1, &e2, new_cs) != 0)
2155
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156
    cpl = env->hflags & HF_CPL_MASK;
2157
#ifdef DEBUG_PCALL
2158
    if (loglevel & CPU_LOG_PCALL) {
2159
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2160
    }
2161
#endif
2162
    if (e2 & DESC_S_MASK) {
2163
        if (!(e2 & DESC_CS_MASK))
2164
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2166
        if (e2 & DESC_C_MASK) {
2167
            /* conforming code segment */
2168
            if (dpl > cpl)
2169
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2170
        } else {
2171
            /* non conforming code segment */
2172
            rpl = new_cs & 3;
2173
            if (rpl > cpl)
2174
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2175
            if (dpl != cpl)
2176
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2177
        }
2178
        if (!(e2 & DESC_P_MASK))
2179
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2180

    
2181
#ifdef TARGET_X86_64
2182
        /* XXX: check 16/32 bit cases in long mode */
2183
        if (shift == 2) {
2184
            target_ulong rsp;
2185
            /* 64 bit case */
2186
            rsp = ESP;
2187
            PUSHQ(rsp, env->segs[R_CS].selector);
2188
            PUSHQ(rsp, next_eip);
2189
            /* from this point, not restartable */
2190
            ESP = rsp;
2191
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2192
                                   get_seg_base(e1, e2),
2193
                                   get_seg_limit(e1, e2), e2);
2194
            EIP = new_eip;
2195
        } else
2196
#endif
2197
        {
2198
            sp = ESP;
2199
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2200
            ssp = env->segs[R_SS].base;
2201
            if (shift) {
2202
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2203
                PUSHL(ssp, sp, sp_mask, next_eip);
2204
            } else {
2205
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2206
                PUSHW(ssp, sp, sp_mask, next_eip);
2207
            }
2208

    
2209
            limit = get_seg_limit(e1, e2);
2210
            if (new_eip > limit)
2211
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
            /* from this point, not restartable */
2213
            SET_ESP(sp, sp_mask);
2214
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2215
                                   get_seg_base(e1, e2), limit, e2);
2216
            EIP = new_eip;
2217
        }
2218
    } else {
2219
        /* check gate type */
2220
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2221
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2222
        rpl = new_cs & 3;
2223
        switch(type) {
2224
        case 1: /* available 286 TSS */
2225
        case 9: /* available 386 TSS */
2226
        case 5: /* task gate */
2227
            if (dpl < cpl || dpl < rpl)
2228
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2229
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2230
            CC_OP = CC_OP_EFLAGS;
2231
            return;
2232
        case 4: /* 286 call gate */
2233
        case 12: /* 386 call gate */
2234
            break;
2235
        default:
2236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
            break;
2238
        }
2239
        shift = type >> 3;
2240

    
2241
        if (dpl < cpl || dpl < rpl)
2242
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243
        /* check valid bit */
2244
        if (!(e2 & DESC_P_MASK))
2245
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2246
        selector = e1 >> 16;
2247
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2248
        param_count = e2 & 0x1f;
2249
        if ((selector & 0xfffc) == 0)
2250
            raise_exception_err(EXCP0D_GPF, 0);
2251

    
2252
        if (load_segment(&e1, &e2, selector) != 0)
2253
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2254
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2255
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2256
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2257
        if (dpl > cpl)
2258
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2259
        if (!(e2 & DESC_P_MASK))
2260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2261

    
2262
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2263
            /* to inner privilege */
2264
            get_ss_esp_from_tss(&ss, &sp, dpl);
2265
#ifdef DEBUG_PCALL
2266
            if (loglevel & CPU_LOG_PCALL)
2267
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2268
                        ss, sp, param_count, ESP);
2269
#endif
2270
            if ((ss & 0xfffc) == 0)
2271
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2272
            if ((ss & 3) != dpl)
2273
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2274
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2275
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2276
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2277
            if (ss_dpl != dpl)
2278
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2279
            if (!(ss_e2 & DESC_S_MASK) ||
2280
                (ss_e2 & DESC_CS_MASK) ||
2281
                !(ss_e2 & DESC_W_MASK))
2282
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2283
            if (!(ss_e2 & DESC_P_MASK))
2284
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2285

    
2286
            //            push_size = ((param_count * 2) + 8) << shift;
2287

    
2288
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2289
            old_ssp = env->segs[R_SS].base;
2290

    
2291
            sp_mask = get_sp_mask(ss_e2);
2292
            ssp = get_seg_base(ss_e1, ss_e2);
2293
            if (shift) {
2294
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2295
                PUSHL(ssp, sp, sp_mask, ESP);
2296
                for(i = param_count - 1; i >= 0; i--) {
2297
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2298
                    PUSHL(ssp, sp, sp_mask, val);
2299
                }
2300
            } else {
2301
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2302
                PUSHW(ssp, sp, sp_mask, ESP);
2303
                for(i = param_count - 1; i >= 0; i--) {
2304
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2305
                    PUSHW(ssp, sp, sp_mask, val);
2306
                }
2307
            }
2308
            new_stack = 1;
2309
        } else {
2310
            /* to same privilege */
2311
            sp = ESP;
2312
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2313
            ssp = env->segs[R_SS].base;
2314
            //            push_size = (4 << shift);
2315
            new_stack = 0;
2316
        }
2317

    
2318
        if (shift) {
2319
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2320
            PUSHL(ssp, sp, sp_mask, next_eip);
2321
        } else {
2322
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2323
            PUSHW(ssp, sp, sp_mask, next_eip);
2324
        }
2325

    
2326
        /* from this point, not restartable */
2327

    
2328
        if (new_stack) {
2329
            ss = (ss & ~3) | dpl;
2330
            cpu_x86_load_seg_cache(env, R_SS, ss,
2331
                                   ssp,
2332
                                   get_seg_limit(ss_e1, ss_e2),
2333
                                   ss_e2);
2334
        }
2335

    
2336
        selector = (selector & ~3) | dpl;
2337
        cpu_x86_load_seg_cache(env, R_CS, selector,
2338
                       get_seg_base(e1, e2),
2339
                       get_seg_limit(e1, e2),
2340
                       e2);
2341
        cpu_x86_set_cpl(env, dpl);
2342
        SET_ESP(sp, sp_mask);
2343
        EIP = offset;
2344
    }
2345
#ifdef USE_KQEMU
2346
    if (kqemu_is_ok(env)) {
2347
        env->exception_index = -1;
2348
        cpu_loop_exit();
2349
    }
2350
#endif
2351
}
2352

    
2353
/* real and vm86 mode iret */
2354
void helper_iret_real(int shift)
2355
{
2356
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2357
    target_ulong ssp;
2358
    int eflags_mask;
2359

    
2360
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2361
    sp = ESP;
2362
    ssp = env->segs[R_SS].base;
2363
    if (shift == 1) {
2364
        /* 32 bits */
2365
        POPL(ssp, sp, sp_mask, new_eip);
2366
        POPL(ssp, sp, sp_mask, new_cs);
2367
        new_cs &= 0xffff;
2368
        POPL(ssp, sp, sp_mask, new_eflags);
2369
    } else {
2370
        /* 16 bits */
2371
        POPW(ssp, sp, sp_mask, new_eip);
2372
        POPW(ssp, sp, sp_mask, new_cs);
2373
        POPW(ssp, sp, sp_mask, new_eflags);
2374
    }
2375
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2376
    load_seg_vm(R_CS, new_cs);
2377
    env->eip = new_eip;
2378
    if (env->eflags & VM_MASK)
2379
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2380
    else
2381
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2382
    if (shift == 0)
2383
        eflags_mask &= 0xffff;
2384
    load_eflags(new_eflags, eflags_mask);
2385
}
2386

    
2387
static inline void validate_seg(int seg_reg, int cpl)
2388
{
2389
    int dpl;
2390
    uint32_t e2;
2391

    
2392
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2393
       they may still contain a valid base. I would be interested to
2394
       know how a real x86_64 CPU behaves */
2395
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2396
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2397
        return;
2398

    
2399
    e2 = env->segs[seg_reg].flags;
2400
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2401
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2402
        /* data or non conforming code segment */
2403
        if (dpl < cpl) {
2404
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2405
        }
2406
    }
2407
}
2408

    
2409
/* protected mode iret */
2410
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2411
{
2412
    uint32_t new_cs, new_eflags, new_ss;
2413
    uint32_t new_es, new_ds, new_fs, new_gs;
2414
    uint32_t e1, e2, ss_e1, ss_e2;
2415
    int cpl, dpl, rpl, eflags_mask, iopl;
2416
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2417

    
2418
#ifdef TARGET_X86_64
2419
    if (shift == 2)
2420
        sp_mask = -1;
2421
    else
2422
#endif
2423
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2424
    sp = ESP;
2425
    ssp = env->segs[R_SS].base;
2426
    new_eflags = 0; /* avoid warning */
2427
#ifdef TARGET_X86_64
2428
    if (shift == 2) {
2429
        POPQ(sp, new_eip);
2430
        POPQ(sp, new_cs);
2431
        new_cs &= 0xffff;
2432
        if (is_iret) {
2433
            POPQ(sp, new_eflags);
2434
        }
2435
    } else
2436
#endif
2437
    if (shift == 1) {
2438
        /* 32 bits */
2439
        POPL(ssp, sp, sp_mask, new_eip);
2440
        POPL(ssp, sp, sp_mask, new_cs);
2441
        new_cs &= 0xffff;
2442
        if (is_iret) {
2443
            POPL(ssp, sp, sp_mask, new_eflags);
2444
            if (new_eflags & VM_MASK)
2445
                goto return_to_vm86;
2446
        }
2447
    } else {
2448
        /* 16 bits */
2449
        POPW(ssp, sp, sp_mask, new_eip);
2450
        POPW(ssp, sp, sp_mask, new_cs);
2451
        if (is_iret)
2452
            POPW(ssp, sp, sp_mask, new_eflags);
2453
    }
2454
#ifdef DEBUG_PCALL
2455
    if (loglevel & CPU_LOG_PCALL) {
2456
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2457
                new_cs, new_eip, shift, addend);
2458
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2459
    }
2460
#endif
2461
    if ((new_cs & 0xfffc) == 0)
2462
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2463
    if (load_segment(&e1, &e2, new_cs) != 0)
2464
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2465
    if (!(e2 & DESC_S_MASK) ||
2466
        !(e2 & DESC_CS_MASK))
2467
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2468
    cpl = env->hflags & HF_CPL_MASK;
2469
    rpl = new_cs & 3;
2470
    if (rpl < cpl)
2471
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2472
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473
    if (e2 & DESC_C_MASK) {
2474
        if (dpl > rpl)
2475
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2476
    } else {
2477
        if (dpl != rpl)
2478
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2479
    }
2480
    if (!(e2 & DESC_P_MASK))
2481
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2482

    
2483
    sp += addend;
2484
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2485
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2486
        /* return to same priledge level */
2487
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2488
                       get_seg_base(e1, e2),
2489
                       get_seg_limit(e1, e2),
2490
                       e2);
2491
    } else {
2492
        /* return to different privilege level */
2493
#ifdef TARGET_X86_64
2494
        if (shift == 2) {
2495
            POPQ(sp, new_esp);
2496
            POPQ(sp, new_ss);
2497
            new_ss &= 0xffff;
2498
        } else
2499
#endif
2500
        if (shift == 1) {
2501
            /* 32 bits */
2502
            POPL(ssp, sp, sp_mask, new_esp);
2503
            POPL(ssp, sp, sp_mask, new_ss);
2504
            new_ss &= 0xffff;
2505
        } else {
2506
            /* 16 bits */
2507
            POPW(ssp, sp, sp_mask, new_esp);
2508
            POPW(ssp, sp, sp_mask, new_ss);
2509
        }
2510
#ifdef DEBUG_PCALL
2511
        if (loglevel & CPU_LOG_PCALL) {
2512
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2513
                    new_ss, new_esp);
2514
        }
2515
#endif
2516
        if ((new_ss & 0xfffc) == 0) {
2517
#ifdef TARGET_X86_64
2518
            /* NULL ss is allowed in long mode if cpl != 3*/
2519
            /* XXX: test CS64 ? */
2520
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2521
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2522
                                       0, 0xffffffff,
2523
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2524
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2525
                                       DESC_W_MASK | DESC_A_MASK);
2526
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2527
            } else
2528
#endif
2529
            {
2530
                raise_exception_err(EXCP0D_GPF, 0);
2531
            }
2532
        } else {
2533
            if ((new_ss & 3) != rpl)
2534
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2535
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2536
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2537
            if (!(ss_e2 & DESC_S_MASK) ||
2538
                (ss_e2 & DESC_CS_MASK) ||
2539
                !(ss_e2 & DESC_W_MASK))
2540
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2541
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2542
            if (dpl != rpl)
2543
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2544
            if (!(ss_e2 & DESC_P_MASK))
2545
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2546
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2547
                                   get_seg_base(ss_e1, ss_e2),
2548
                                   get_seg_limit(ss_e1, ss_e2),
2549
                                   ss_e2);
2550
        }
2551

    
2552
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2553
                       get_seg_base(e1, e2),
2554
                       get_seg_limit(e1, e2),
2555
                       e2);
2556
        cpu_x86_set_cpl(env, rpl);
2557
        sp = new_esp;
2558
#ifdef TARGET_X86_64
2559
        if (env->hflags & HF_CS64_MASK)
2560
            sp_mask = -1;
2561
        else
2562
#endif
2563
            sp_mask = get_sp_mask(ss_e2);
2564

    
2565
        /* validate data segments */
2566
        validate_seg(R_ES, rpl);
2567
        validate_seg(R_DS, rpl);
2568
        validate_seg(R_FS, rpl);
2569
        validate_seg(R_GS, rpl);
2570

    
2571
        sp += addend;
2572
    }
2573
    SET_ESP(sp, sp_mask);
2574
    env->eip = new_eip;
2575
    if (is_iret) {
2576
        /* NOTE: 'cpl' is the _old_ CPL */
2577
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2578
        if (cpl == 0)
2579
            eflags_mask |= IOPL_MASK;
2580
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2581
        if (cpl <= iopl)
2582
            eflags_mask |= IF_MASK;
2583
        if (shift == 0)
2584
            eflags_mask &= 0xffff;
2585
        load_eflags(new_eflags, eflags_mask);
2586
    }
2587
    return;
2588

    
2589
 return_to_vm86:
2590
    POPL(ssp, sp, sp_mask, new_esp);
2591
    POPL(ssp, sp, sp_mask, new_ss);
2592
    POPL(ssp, sp, sp_mask, new_es);
2593
    POPL(ssp, sp, sp_mask, new_ds);
2594
    POPL(ssp, sp, sp_mask, new_fs);
2595
    POPL(ssp, sp, sp_mask, new_gs);
2596

    
2597
    /* modify processor state */
2598
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2599
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2600
    load_seg_vm(R_CS, new_cs & 0xffff);
2601
    cpu_x86_set_cpl(env, 3);
2602
    load_seg_vm(R_SS, new_ss & 0xffff);
2603
    load_seg_vm(R_ES, new_es & 0xffff);
2604
    load_seg_vm(R_DS, new_ds & 0xffff);
2605
    load_seg_vm(R_FS, new_fs & 0xffff);
2606
    load_seg_vm(R_GS, new_gs & 0xffff);
2607

    
2608
    env->eip = new_eip & 0xffff;
2609
    ESP = new_esp;
2610
}
2611

    
2612
void helper_iret_protected(int shift, int next_eip)
2613
{
2614
    int tss_selector, type;
2615
    uint32_t e1, e2;
2616

    
2617
    /* specific case for TSS */
2618
    if (env->eflags & NT_MASK) {
2619
#ifdef TARGET_X86_64
2620
        if (env->hflags & HF_LMA_MASK)
2621
            raise_exception_err(EXCP0D_GPF, 0);
2622
#endif
2623
        tss_selector = lduw_kernel(env->tr.base + 0);
2624
        if (tss_selector & 4)
2625
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2626
        if (load_segment(&e1, &e2, tss_selector) != 0)
2627
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2628
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2629
        /* NOTE: we check both segment and busy TSS */
2630
        if (type != 3)
2631
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2632
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2633
    } else {
2634
        helper_ret_protected(shift, 1, 0);
2635
    }
2636
#ifdef USE_KQEMU
2637
    if (kqemu_is_ok(env)) {
2638
        CC_OP = CC_OP_EFLAGS;
2639
        env->exception_index = -1;
2640
        cpu_loop_exit();
2641
    }
2642
#endif
2643
}
2644

    
2645
void helper_lret_protected(int shift, int addend)
2646
{
2647
    helper_ret_protected(shift, 0, addend);
2648
#ifdef USE_KQEMU
2649
    if (kqemu_is_ok(env)) {
2650
        env->exception_index = -1;
2651
        cpu_loop_exit();
2652
    }
2653
#endif
2654
}
2655

    
2656
void helper_sysenter(void)
2657
{
2658
    if (env->sysenter_cs == 0) {
2659
        raise_exception_err(EXCP0D_GPF, 0);
2660
    }
2661
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2662
    cpu_x86_set_cpl(env, 0);
2663
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2664
                           0, 0xffffffff,
2665
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2666
                           DESC_S_MASK |
2667
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2668
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2669
                           0, 0xffffffff,
2670
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2671
                           DESC_S_MASK |
2672
                           DESC_W_MASK | DESC_A_MASK);
2673
    ESP = env->sysenter_esp;
2674
    EIP = env->sysenter_eip;
2675
}
2676

    
2677
void helper_sysexit(void)
2678
{
2679
    int cpl;
2680

    
2681
    cpl = env->hflags & HF_CPL_MASK;
2682
    if (env->sysenter_cs == 0 || cpl != 0) {
2683
        raise_exception_err(EXCP0D_GPF, 0);
2684
    }
2685
    cpu_x86_set_cpl(env, 3);
2686
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2687
                           0, 0xffffffff,
2688
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2689
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2690
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2691
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2692
                           0, 0xffffffff,
2693
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2694
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2695
                           DESC_W_MASK | DESC_A_MASK);
2696
    ESP = ECX;
2697
    EIP = EDX;
2698
#ifdef USE_KQEMU
2699
    if (kqemu_is_ok(env)) {
2700
        env->exception_index = -1;
2701
        cpu_loop_exit();
2702
    }
2703
#endif
2704
}
2705

    
2706
void helper_movl_crN_T0(int reg)
2707
{
2708
#if !defined(CONFIG_USER_ONLY)
2709
    switch(reg) {
2710
    case 0:
2711
        cpu_x86_update_cr0(env, T0);
2712
        break;
2713
    case 3:
2714
        cpu_x86_update_cr3(env, T0);
2715
        break;
2716
    case 4:
2717
        cpu_x86_update_cr4(env, T0);
2718
        break;
2719
    case 8:
2720
        cpu_set_apic_tpr(env, T0);
2721
        env->cr[8] = T0;
2722
        break;
2723
    default:
2724
        env->cr[reg] = T0;
2725
        break;
2726
    }
2727
#endif
2728
}
2729

    
2730
/* XXX: do more */
2731
void helper_movl_drN_T0(int reg)
2732
{
2733
    env->dr[reg] = T0;
2734
}
2735

    
2736
void helper_invlpg(target_ulong addr)
2737
{
2738
    cpu_x86_flush_tlb(env, addr);
2739
}
2740

    
2741
void helper_rdtsc(void)
2742
{
2743
    uint64_t val;
2744

    
2745
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2746
        raise_exception(EXCP0D_GPF);
2747
    }
2748
    val = cpu_get_tsc(env);
2749
    EAX = (uint32_t)(val);
2750
    EDX = (uint32_t)(val >> 32);
2751
}
2752

    
2753
void helper_rdpmc(void)
2754
{
2755
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2756
        raise_exception(EXCP0D_GPF);
2757
    }
2758

    
2759
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2760
        /* currently unimplemented */
2761
        raise_exception_err(EXCP06_ILLOP, 0);
2762
    }
2763
}
2764

    
2765
#if defined(CONFIG_USER_ONLY)
2766
void helper_wrmsr(void)
2767
{
2768
}
2769

    
2770
void helper_rdmsr(void)
2771
{
2772
}
2773
#else
2774
void helper_wrmsr(void)
2775
{
2776
    uint64_t val;
2777

    
2778
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2779

    
2780
    switch((uint32_t)ECX) {
2781
    case MSR_IA32_SYSENTER_CS:
2782
        env->sysenter_cs = val & 0xffff;
2783
        break;
2784
    case MSR_IA32_SYSENTER_ESP:
2785
        env->sysenter_esp = val;
2786
        break;
2787
    case MSR_IA32_SYSENTER_EIP:
2788
        env->sysenter_eip = val;
2789
        break;
2790
    case MSR_IA32_APICBASE:
2791
        cpu_set_apic_base(env, val);
2792
        break;
2793
    case MSR_EFER:
2794
        {
2795
            uint64_t update_mask;
2796
            update_mask = 0;
2797
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2798
                update_mask |= MSR_EFER_SCE;
2799
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2800
                update_mask |= MSR_EFER_LME;
2801
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2802
                update_mask |= MSR_EFER_FFXSR;
2803
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2804
                update_mask |= MSR_EFER_NXE;
2805
            env->efer = (env->efer & ~update_mask) |
2806
            (val & update_mask);
2807
        }
2808
        break;
2809
    case MSR_STAR:
2810
        env->star = val;
2811
        break;
2812
    case MSR_PAT:
2813
        env->pat = val;
2814
        break;
2815
    case MSR_VM_HSAVE_PA:
2816
        env->vm_hsave = val;
2817
        break;
2818
#ifdef TARGET_X86_64
2819
    case MSR_LSTAR:
2820
        env->lstar = val;
2821
        break;
2822
    case MSR_CSTAR:
2823
        env->cstar = val;
2824
        break;
2825
    case MSR_FMASK:
2826
        env->fmask = val;
2827
        break;
2828
    case MSR_FSBASE:
2829
        env->segs[R_FS].base = val;
2830
        break;
2831
    case MSR_GSBASE:
2832
        env->segs[R_GS].base = val;
2833
        break;
2834
    case MSR_KERNELGSBASE:
2835
        env->kernelgsbase = val;
2836
        break;
2837
#endif
2838
    default:
2839
        /* XXX: exception ? */
2840
        break;
2841
    }
2842
}
2843

    
2844
void helper_rdmsr(void)
2845
{
2846
    uint64_t val;
2847
    switch((uint32_t)ECX) {
2848
    case MSR_IA32_SYSENTER_CS:
2849
        val = env->sysenter_cs;
2850
        break;
2851
    case MSR_IA32_SYSENTER_ESP:
2852
        val = env->sysenter_esp;
2853
        break;
2854
    case MSR_IA32_SYSENTER_EIP:
2855
        val = env->sysenter_eip;
2856
        break;
2857
    case MSR_IA32_APICBASE:
2858
        val = cpu_get_apic_base(env);
2859
        break;
2860
    case MSR_EFER:
2861
        val = env->efer;
2862
        break;
2863
    case MSR_STAR:
2864
        val = env->star;
2865
        break;
2866
    case MSR_PAT:
2867
        val = env->pat;
2868
        break;
2869
    case MSR_VM_HSAVE_PA:
2870
        val = env->vm_hsave;
2871
        break;
2872
#ifdef TARGET_X86_64
2873
    case MSR_LSTAR:
2874
        val = env->lstar;
2875
        break;
2876
    case MSR_CSTAR:
2877
        val = env->cstar;
2878
        break;
2879
    case MSR_FMASK:
2880
        val = env->fmask;
2881
        break;
2882
    case MSR_FSBASE:
2883
        val = env->segs[R_FS].base;
2884
        break;
2885
    case MSR_GSBASE:
2886
        val = env->segs[R_GS].base;
2887
        break;
2888
    case MSR_KERNELGSBASE:
2889
        val = env->kernelgsbase;
2890
        break;
2891
#endif
2892
    default:
2893
        /* XXX: exception ? */
2894
        val = 0;
2895
        break;
2896
    }
2897
    EAX = (uint32_t)(val);
2898
    EDX = (uint32_t)(val >> 32);
2899
}
2900
#endif
2901

    
2902
void helper_lsl(void)
2903
{
2904
    unsigned int selector, limit;
2905
    uint32_t e1, e2, eflags;
2906
    int rpl, dpl, cpl, type;
2907

    
2908
    eflags = cc_table[CC_OP].compute_all();
2909
    selector = T0 & 0xffff;
2910
    if (load_segment(&e1, &e2, selector) != 0)
2911
        goto fail;
2912
    rpl = selector & 3;
2913
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2914
    cpl = env->hflags & HF_CPL_MASK;
2915
    if (e2 & DESC_S_MASK) {
2916
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2917
            /* conforming */
2918
        } else {
2919
            if (dpl < cpl || dpl < rpl)
2920
                goto fail;
2921
        }
2922
    } else {
2923
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2924
        switch(type) {
2925
        case 1:
2926
        case 2:
2927
        case 3:
2928
        case 9:
2929
        case 11:
2930
            break;
2931
        default:
2932
            goto fail;
2933
        }
2934
        if (dpl < cpl || dpl < rpl) {
2935
        fail:
2936
            CC_SRC = eflags & ~CC_Z;
2937
            return;
2938
        }
2939
    }
2940
    limit = get_seg_limit(e1, e2);
2941
    T1 = limit;
2942
    CC_SRC = eflags | CC_Z;
2943
}
2944

    
2945
void helper_lar(void)
2946
{
2947
    unsigned int selector;
2948
    uint32_t e1, e2, eflags;
2949
    int rpl, dpl, cpl, type;
2950

    
2951
    eflags = cc_table[CC_OP].compute_all();
2952
    selector = T0 & 0xffff;
2953
    if ((selector & 0xfffc) == 0)
2954
        goto fail;
2955
    if (load_segment(&e1, &e2, selector) != 0)
2956
        goto fail;
2957
    rpl = selector & 3;
2958
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2959
    cpl = env->hflags & HF_CPL_MASK;
2960
    if (e2 & DESC_S_MASK) {
2961
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2962
            /* conforming */
2963
        } else {
2964
            if (dpl < cpl || dpl < rpl)
2965
                goto fail;
2966
        }
2967
    } else {
2968
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2969
        switch(type) {
2970
        case 1:
2971
        case 2:
2972
        case 3:
2973
        case 4:
2974
        case 5:
2975
        case 9:
2976
        case 11:
2977
        case 12:
2978
            break;
2979
        default:
2980
            goto fail;
2981
        }
2982
        if (dpl < cpl || dpl < rpl) {
2983
        fail:
2984
            CC_SRC = eflags & ~CC_Z;
2985
            return;
2986
        }
2987
    }
2988
    T1 = e2 & 0x00f0ff00;
2989
    CC_SRC = eflags | CC_Z;
2990
}
2991

    
2992
void helper_verr(void)
2993
{
2994
    unsigned int selector;
2995
    uint32_t e1, e2, eflags;
2996
    int rpl, dpl, cpl;
2997

    
2998
    eflags = cc_table[CC_OP].compute_all();
2999
    selector = T0 & 0xffff;
3000
    if ((selector & 0xfffc) == 0)
3001
        goto fail;
3002
    if (load_segment(&e1, &e2, selector) != 0)
3003
        goto fail;
3004
    if (!(e2 & DESC_S_MASK))
3005
        goto fail;
3006
    rpl = selector & 3;
3007
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3008
    cpl = env->hflags & HF_CPL_MASK;
3009
    if (e2 & DESC_CS_MASK) {
3010
        if (!(e2 & DESC_R_MASK))
3011
            goto fail;
3012
        if (!(e2 & DESC_C_MASK)) {
3013
            if (dpl < cpl || dpl < rpl)
3014
                goto fail;
3015
        }
3016
    } else {
3017
        if (dpl < cpl || dpl < rpl) {
3018
        fail:
3019
            CC_SRC = eflags & ~CC_Z;
3020
            return;
3021
        }
3022
    }
3023
    CC_SRC = eflags | CC_Z;
3024
}
3025

    
3026
void helper_verw(void)
3027
{
3028
    unsigned int selector;
3029
    uint32_t e1, e2, eflags;
3030
    int rpl, dpl, cpl;
3031

    
3032
    eflags = cc_table[CC_OP].compute_all();
3033
    selector = T0 & 0xffff;
3034
    if ((selector & 0xfffc) == 0)
3035
        goto fail;
3036
    if (load_segment(&e1, &e2, selector) != 0)
3037
        goto fail;
3038
    if (!(e2 & DESC_S_MASK))
3039
        goto fail;
3040
    rpl = selector & 3;
3041
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3042
    cpl = env->hflags & HF_CPL_MASK;
3043
    if (e2 & DESC_CS_MASK) {
3044
        goto fail;
3045
    } else {
3046
        if (dpl < cpl || dpl < rpl)
3047
            goto fail;
3048
        if (!(e2 & DESC_W_MASK)) {
3049
        fail:
3050
            CC_SRC = eflags & ~CC_Z;
3051
            return;
3052
        }
3053
    }
3054
    CC_SRC = eflags | CC_Z;
3055
}
3056

    
3057
/* FPU helpers */
3058

    
3059
void helper_fldt_ST0_A0(void)
3060
{
3061
    int new_fpstt;
3062
    new_fpstt = (env->fpstt - 1) & 7;
3063
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3064
    env->fpstt = new_fpstt;
3065
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3066
}
3067

    
3068
void helper_fstt_ST0_A0(void)
3069
{
3070
    helper_fstt(ST0, A0);
3071
}
3072

    
3073
static void fpu_set_exception(int mask)
3074
{
3075
    env->fpus |= mask;
3076
    if (env->fpus & (~env->fpuc & FPUC_EM))
3077
        env->fpus |= FPUS_SE | FPUS_B;
3078
}
3079

    
3080
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3081
{
3082
    if (b == 0.0)
3083
        fpu_set_exception(FPUS_ZE);
3084
    return a / b;
3085
}
3086

    
3087
void fpu_raise_exception(void)
3088
{
3089
    if (env->cr[0] & CR0_NE_MASK) {
3090
        raise_exception(EXCP10_COPR);
3091
    }
3092
#if !defined(CONFIG_USER_ONLY)
3093
    else {
3094
        cpu_set_ferr(env);
3095
    }
3096
#endif
3097
}
3098

    
3099
/* BCD ops */
3100

    
3101
void helper_fbld_ST0_A0(void)
3102
{
3103
    CPU86_LDouble tmp;
3104
    uint64_t val;
3105
    unsigned int v;
3106
    int i;
3107

    
3108
    val = 0;
3109
    for(i = 8; i >= 0; i--) {
3110
        v = ldub(A0 + i);
3111
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3112
    }
3113
    tmp = val;
3114
    if (ldub(A0 + 9) & 0x80)
3115
        tmp = -tmp;
3116
    fpush();
3117
    ST0 = tmp;
3118
}
3119

    
3120
void helper_fbst_ST0_A0(void)
3121
{
3122
    int v;
3123
    target_ulong mem_ref, mem_end;
3124
    int64_t val;
3125

    
3126
    val = floatx_to_int64(ST0, &env->fp_status);
3127
    mem_ref = A0;
3128
    mem_end = mem_ref + 9;
3129
    if (val < 0) {
3130
        stb(mem_end, 0x80);
3131
        val = -val;
3132
    } else {
3133
        stb(mem_end, 0x00);
3134
    }
3135
    while (mem_ref < mem_end) {
3136
        if (val == 0)
3137
            break;
3138
        v = val % 100;
3139
        val = val / 100;
3140
        v = ((v / 10) << 4) | (v % 10);
3141
        stb(mem_ref++, v);
3142
    }
3143
    while (mem_ref < mem_end) {
3144
        stb(mem_ref++, 0);
3145
    }
3146
}
3147

    
3148
void helper_f2xm1(void)
3149
{
3150
    ST0 = pow(2.0,ST0) - 1.0;
3151
}
3152

    
3153
void helper_fyl2x(void)
3154
{
3155
    CPU86_LDouble fptemp;
3156

    
3157
    fptemp = ST0;
3158
    if (fptemp>0.0){
3159
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3160
        ST1 *= fptemp;
3161
        fpop();
3162
    } else {
3163
        env->fpus &= (~0x4700);
3164
        env->fpus |= 0x400;
3165
    }
3166
}
3167

    
3168
void helper_fptan(void)
3169
{
3170
    CPU86_LDouble fptemp;
3171

    
3172
    fptemp = ST0;
3173
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3174
        env->fpus |= 0x400;
3175
    } else {
3176
        ST0 = tan(fptemp);
3177
        fpush();
3178
        ST0 = 1.0;
3179
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3180
        /* the above code is for  |arg| < 2**52 only */
3181
    }
3182
}
3183

    
3184
void helper_fpatan(void)
3185
{
3186
    CPU86_LDouble fptemp, fpsrcop;
3187

    
3188
    fpsrcop = ST1;
3189
    fptemp = ST0;
3190
    ST1 = atan2(fpsrcop,fptemp);
3191
    fpop();
3192
}
3193

    
3194
void helper_fxtract(void)
3195
{
3196
    CPU86_LDoubleU temp;
3197
    unsigned int expdif;
3198

    
3199
    temp.d = ST0;
3200
    expdif = EXPD(temp) - EXPBIAS;
3201
    /*DP exponent bias*/
3202
    ST0 = expdif;
3203
    fpush();
3204
    BIASEXPONENT(temp);
3205
    ST0 = temp.d;
3206
}
3207

    
3208
void helper_fprem1(void)
3209
{
3210
    CPU86_LDouble dblq, fpsrcop, fptemp;
3211
    CPU86_LDoubleU fpsrcop1, fptemp1;
3212
    int expdif;
3213
    signed long long int q;
3214

    
3215
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3216
        ST0 = 0.0 / 0.0; /* NaN */
3217
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3218
        return;
3219
    }
3220

    
3221
    fpsrcop = ST0;
3222
    fptemp = ST1;
3223
    fpsrcop1.d = fpsrcop;
3224
    fptemp1.d = fptemp;
3225
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3226

    
3227
    if (expdif < 0) {
3228
        /* optimisation? taken from the AMD docs */
3229
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3230
        /* ST0 is unchanged */
3231
        return;
3232
    }
3233

    
3234
    if (expdif < 53) {
3235
        dblq = fpsrcop / fptemp;
3236
        /* round dblq towards nearest integer */
3237
        dblq = rint(dblq);
3238
        ST0 = fpsrcop - fptemp * dblq;
3239

    
3240
        /* convert dblq to q by truncating towards zero */
3241
        if (dblq < 0.0)
3242
           q = (signed long long int)(-dblq);
3243
        else
3244
           q = (signed long long int)dblq;
3245

    
3246
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3247
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3248
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3249
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3250
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3251
    } else {
3252
        env->fpus |= 0x400;  /* C2 <-- 1 */
3253
        fptemp = pow(2.0, expdif - 50);
3254
        fpsrcop = (ST0 / ST1) / fptemp;
3255
        /* fpsrcop = integer obtained by chopping */
3256
        fpsrcop = (fpsrcop < 0.0) ?
3257
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3258
        ST0 -= (ST1 * fpsrcop * fptemp);
3259
    }
3260
}
3261

    
3262
void helper_fprem(void)
3263
{
3264
    CPU86_LDouble dblq, fpsrcop, fptemp;
3265
    CPU86_LDoubleU fpsrcop1, fptemp1;
3266
    int expdif;
3267
    signed long long int q;
3268

    
3269
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3270
       ST0 = 0.0 / 0.0; /* NaN */
3271
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3272
       return;
3273
    }
3274

    
3275
    fpsrcop = (CPU86_LDouble)ST0;
3276
    fptemp = (CPU86_LDouble)ST1;
3277
    fpsrcop1.d = fpsrcop;
3278
    fptemp1.d = fptemp;
3279
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3280

    
3281
    if (expdif < 0) {
3282
        /* optimisation? taken from the AMD docs */
3283
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3284
        /* ST0 is unchanged */
3285
        return;
3286
    }
3287

    
3288
    if ( expdif < 53 ) {
3289
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3290
        /* round dblq towards zero */
3291
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3292
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3293

    
3294
        /* convert dblq to q by truncating towards zero */
3295
        if (dblq < 0.0)
3296
           q = (signed long long int)(-dblq);
3297
        else
3298
           q = (signed long long int)dblq;
3299

    
3300
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3301
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3302
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3303
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3304
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3305
    } else {
3306
        int N = 32 + (expdif % 32); /* as per AMD docs */
3307
        env->fpus |= 0x400;  /* C2 <-- 1 */
3308
        fptemp = pow(2.0, (double)(expdif - N));
3309
        fpsrcop = (ST0 / ST1) / fptemp;
3310
        /* fpsrcop = integer obtained by chopping */
3311
        fpsrcop = (fpsrcop < 0.0) ?
3312
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3313
        ST0 -= (ST1 * fpsrcop * fptemp);
3314
    }
3315
}
3316

    
3317
void helper_fyl2xp1(void)
3318
{
3319
    CPU86_LDouble fptemp;
3320

    
3321
    fptemp = ST0;
3322
    if ((fptemp+1.0)>0.0) {
3323
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3324
        ST1 *= fptemp;
3325
        fpop();
3326
    } else {
3327
        env->fpus &= (~0x4700);
3328
        env->fpus |= 0x400;
3329
    }
3330
}
3331

    
3332
void helper_fsqrt(void)
3333
{
3334
    CPU86_LDouble fptemp;
3335

    
3336
    fptemp = ST0;
3337
    if (fptemp<0.0) {
3338
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3339
        env->fpus |= 0x400;
3340
    }
3341
    ST0 = sqrt(fptemp);
3342
}
3343

    
3344
void helper_fsincos(void)
3345
{
3346
    CPU86_LDouble fptemp;
3347

    
3348
    fptemp = ST0;
3349
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3350
        env->fpus |= 0x400;
3351
    } else {
3352
        ST0 = sin(fptemp);
3353
        fpush();
3354
        ST0 = cos(fptemp);
3355
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3356
        /* the above code is for  |arg| < 2**63 only */
3357
    }
3358
}
3359

    
3360
void helper_frndint(void)
3361
{
3362
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3363
}
3364

    
3365
void helper_fscale(void)
3366
{
3367
    ST0 = ldexp (ST0, (int)(ST1));
3368
}
3369

    
3370
void helper_fsin(void)
3371
{
3372
    CPU86_LDouble fptemp;
3373

    
3374
    fptemp = ST0;
3375
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3376
        env->fpus |= 0x400;
3377
    } else {
3378
        ST0 = sin(fptemp);
3379
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3380
        /* the above code is for  |arg| < 2**53 only */
3381
    }
3382
}
3383

    
3384
void helper_fcos(void)
3385
{
3386
    CPU86_LDouble fptemp;
3387

    
3388
    fptemp = ST0;
3389
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3390
        env->fpus |= 0x400;
3391
    } else {
3392
        ST0 = cos(fptemp);
3393
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3394
        /* the above code is for  |arg5 < 2**63 only */
3395
    }
3396
}
3397

    
3398
void helper_fxam_ST0(void)
3399
{
3400
    CPU86_LDoubleU temp;
3401
    int expdif;
3402

    
3403
    temp.d = ST0;
3404

    
3405
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3406
    if (SIGND(temp))
3407
        env->fpus |= 0x200; /* C1 <-- 1 */
3408

    
3409
    /* XXX: test fptags too */
3410
    expdif = EXPD(temp);
3411
    if (expdif == MAXEXPD) {
3412
#ifdef USE_X86LDOUBLE
3413
        if (MANTD(temp) == 0x8000000000000000ULL)
3414
#else
3415
        if (MANTD(temp) == 0)
3416
#endif
3417
            env->fpus |=  0x500 /*Infinity*/;
3418
        else
3419
            env->fpus |=  0x100 /*NaN*/;
3420
    } else if (expdif == 0) {
3421
        if (MANTD(temp) == 0)
3422
            env->fpus |=  0x4000 /*Zero*/;
3423
        else
3424
            env->fpus |= 0x4400 /*Denormal*/;
3425
    } else {
3426
        env->fpus |= 0x400;
3427
    }
3428
}
3429

    
3430
void helper_fstenv(target_ulong ptr, int data32)
3431
{
3432
    int fpus, fptag, exp, i;
3433
    uint64_t mant;
3434
    CPU86_LDoubleU tmp;
3435

    
3436
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3437
    fptag = 0;
3438
    for (i=7; i>=0; i--) {
3439
        fptag <<= 2;
3440
        if (env->fptags[i]) {
3441
            fptag |= 3;
3442
        } else {
3443
            tmp.d = env->fpregs[i].d;
3444
            exp = EXPD(tmp);
3445
            mant = MANTD(tmp);
3446
            if (exp == 0 && mant == 0) {
3447
                /* zero */
3448
                fptag |= 1;
3449
            } else if (exp == 0 || exp == MAXEXPD
3450
#ifdef USE_X86LDOUBLE
3451
                       || (mant & (1LL << 63)) == 0
3452
#endif
3453
                       ) {
3454
                /* NaNs, infinity, denormal */
3455
                fptag |= 2;
3456
            }
3457
        }
3458
    }
3459
    if (data32) {
3460
        /* 32 bit */
3461
        stl(ptr, env->fpuc);
3462
        stl(ptr + 4, fpus);
3463
        stl(ptr + 8, fptag);
3464
        stl(ptr + 12, 0); /* fpip */
3465
        stl(ptr + 16, 0); /* fpcs */
3466
        stl(ptr + 20, 0); /* fpoo */
3467
        stl(ptr + 24, 0); /* fpos */
3468
    } else {
3469
        /* 16 bit */
3470
        stw(ptr, env->fpuc);
3471
        stw(ptr + 2, fpus);
3472
        stw(ptr + 4, fptag);
3473
        stw(ptr + 6, 0);
3474
        stw(ptr + 8, 0);
3475
        stw(ptr + 10, 0);
3476
        stw(ptr + 12, 0);
3477
    }
3478
}
3479

    
3480
void helper_fldenv(target_ulong ptr, int data32)
3481
{
3482
    int i, fpus, fptag;
3483

    
3484
    if (data32) {
3485
        env->fpuc = lduw(ptr);
3486
        fpus = lduw(ptr + 4);
3487
        fptag = lduw(ptr + 8);
3488
    }
3489
    else {
3490
        env->fpuc = lduw(ptr);
3491
        fpus = lduw(ptr + 2);
3492
        fptag = lduw(ptr + 4);
3493
    }
3494
    env->fpstt = (fpus >> 11) & 7;
3495
    env->fpus = fpus & ~0x3800;
3496
    for(i = 0;i < 8; i++) {
3497
        env->fptags[i] = ((fptag & 3) == 3);
3498
        fptag >>= 2;
3499
    }
3500
}
3501

    
3502
void helper_fsave(target_ulong ptr, int data32)
3503
{
3504
    CPU86_LDouble tmp;
3505
    int i;
3506

    
3507
    helper_fstenv(ptr, data32);
3508

    
3509
    ptr += (14 << data32);
3510
    for(i = 0;i < 8; i++) {
3511
        tmp = ST(i);
3512
        helper_fstt(tmp, ptr);
3513
        ptr += 10;
3514
    }
3515

    
3516
    /* fninit */
3517
    env->fpus = 0;
3518
    env->fpstt = 0;
3519
    env->fpuc = 0x37f;
3520
    env->fptags[0] = 1;
3521
    env->fptags[1] = 1;
3522
    env->fptags[2] = 1;
3523
    env->fptags[3] = 1;
3524
    env->fptags[4] = 1;
3525
    env->fptags[5] = 1;
3526
    env->fptags[6] = 1;
3527
    env->fptags[7] = 1;
3528
}
3529

    
3530
void helper_frstor(target_ulong ptr, int data32)
3531
{
3532
    CPU86_LDouble tmp;
3533
    int i;
3534

    
3535
    helper_fldenv(ptr, data32);
3536
    ptr += (14 << data32);
3537

    
3538
    for(i = 0;i < 8; i++) {
3539
        tmp = helper_fldt(ptr);
3540
        ST(i) = tmp;
3541
        ptr += 10;
3542
    }
3543
}
3544

    
3545
void helper_fxsave(target_ulong ptr, int data64)
3546
{
3547
    int fpus, fptag, i, nb_xmm_regs;
3548
    CPU86_LDouble tmp;
3549
    target_ulong addr;
3550

    
3551
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3552
    fptag = 0;
3553
    for(i = 0; i < 8; i++) {
3554
        fptag |= (env->fptags[i] << i);
3555
    }
3556
    stw(ptr, env->fpuc);
3557
    stw(ptr + 2, fpus);
3558
    stw(ptr + 4, fptag ^ 0xff);
3559

    
3560
    addr = ptr + 0x20;
3561
    for(i = 0;i < 8; i++) {
3562
        tmp = ST(i);
3563
        helper_fstt(tmp, addr);
3564
        addr += 16;
3565
    }
3566

    
3567
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3568
        /* XXX: finish it */
3569
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3570
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3571
        nb_xmm_regs = 8 << data64;
3572
        addr = ptr + 0xa0;
3573
        for(i = 0; i < nb_xmm_regs; i++) {
3574
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3575
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3576
            addr += 16;
3577
        }
3578
    }
3579
}
3580

    
3581
void helper_fxrstor(target_ulong ptr, int data64)
3582
{
3583
    int i, fpus, fptag, nb_xmm_regs;
3584
    CPU86_LDouble tmp;
3585
    target_ulong addr;
3586

    
3587
    env->fpuc = lduw(ptr);
3588
    fpus = lduw(ptr + 2);
3589
    fptag = lduw(ptr + 4);
3590
    env->fpstt = (fpus >> 11) & 7;
3591
    env->fpus = fpus & ~0x3800;
3592
    fptag ^= 0xff;
3593
    for(i = 0;i < 8; i++) {
3594
        env->fptags[i] = ((fptag >> i) & 1);
3595
    }
3596

    
3597
    addr = ptr + 0x20;
3598
    for(i = 0;i < 8; i++) {
3599
        tmp = helper_fldt(addr);
3600
        ST(i) = tmp;
3601
        addr += 16;
3602
    }
3603

    
3604
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3605
        /* XXX: finish it */
3606
        env->mxcsr = ldl(ptr + 0x18);
3607
        //ldl(ptr + 0x1c);
3608
        nb_xmm_regs = 8 << data64;
3609
        addr = ptr + 0xa0;
3610
        for(i = 0; i < nb_xmm_regs; i++) {
3611
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3612
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3613
            addr += 16;
3614
        }
3615
    }
3616
}
3617

    
3618
#ifndef USE_X86LDOUBLE
3619

    
3620
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3621
{
3622
    CPU86_LDoubleU temp;
3623
    int e;
3624

    
3625
    temp.d = f;
3626
    /* mantissa */
3627
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3628
    /* exponent + sign */
3629
    e = EXPD(temp) - EXPBIAS + 16383;
3630
    e |= SIGND(temp) >> 16;
3631
    *pexp = e;
3632
}
3633

    
3634
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3635
{
3636
    CPU86_LDoubleU temp;
3637
    int e;
3638
    uint64_t ll;
3639

    
3640
    /* XXX: handle overflow ? */
3641
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3642
    e |= (upper >> 4) & 0x800; /* sign */
3643
    ll = (mant >> 11) & ((1LL << 52) - 1);
3644
#ifdef __arm__
3645
    temp.l.upper = (e << 20) | (ll >> 32);
3646
    temp.l.lower = ll;
3647
#else
3648
    temp.ll = ll | ((uint64_t)e << 52);
3649
#endif
3650
    return temp.d;
3651
}
3652

    
3653
#else
3654

    
3655
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3656
{
3657
    CPU86_LDoubleU temp;
3658

    
3659
    temp.d = f;
3660
    *pmant = temp.l.lower;
3661
    *pexp = temp.l.upper;
3662
}
3663

    
3664
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3665
{
3666
    CPU86_LDoubleU temp;
3667

    
3668
    temp.l.upper = upper;
3669
    temp.l.lower = mant;
3670
    return temp.d;
3671
}
3672
#endif
3673

    
3674
#ifdef TARGET_X86_64
3675

    
3676
//#define DEBUG_MULDIV
3677

    
3678
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3679
{
3680
    *plow += a;
3681
    /* carry test */
3682
    if (*plow < a)
3683
        (*phigh)++;
3684
    *phigh += b;
3685
}
3686

    
3687
static void neg128(uint64_t *plow, uint64_t *phigh)
3688
{
3689
    *plow = ~ *plow;
3690
    *phigh = ~ *phigh;
3691
    add128(plow, phigh, 1, 0);
3692
}
3693

    
3694
/* return TRUE if overflow */
3695
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3696
{
3697
    uint64_t q, r, a1, a0;
3698
    int i, qb, ab;
3699

    
3700
    a0 = *plow;
3701
    a1 = *phigh;
3702
    if (a1 == 0) {
3703
        q = a0 / b;
3704
        r = a0 % b;
3705
        *plow = q;
3706
        *phigh = r;
3707
    } else {
3708
        if (a1 >= b)
3709
            return 1;
3710
        /* XXX: use a better algorithm */
3711
        for(i = 0; i < 64; i++) {
3712
            ab = a1 >> 63;
3713
            a1 = (a1 << 1) | (a0 >> 63);
3714
            if (ab || a1 >= b) {
3715
                a1 -= b;
3716
                qb = 1;
3717
            } else {
3718
                qb = 0;
3719
            }
3720
            a0 = (a0 << 1) | qb;
3721
        }
3722
#if defined(DEBUG_MULDIV)
3723
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3724
               *phigh, *plow, b, a0, a1);
3725
#endif
3726
        *plow = a0;
3727
        *phigh = a1;
3728
    }
3729
    return 0;
3730
}
3731

    
3732
/* return TRUE if overflow */
3733
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3734
{
3735
    int sa, sb;
3736
    sa = ((int64_t)*phigh < 0);
3737
    if (sa)
3738
        neg128(plow, phigh);
3739
    sb = (b < 0);
3740
    if (sb)
3741
        b = -b;
3742
    if (div64(plow, phigh, b) != 0)
3743
        return 1;
3744
    if (sa ^ sb) {
3745
        if (*plow > (1ULL << 63))
3746
            return 1;
3747
        *plow = - *plow;
3748
    } else {
3749
        if (*plow >= (1ULL << 63))
3750
            return 1;
3751
    }
3752
    if (sa)
3753
        *phigh = - *phigh;
3754
    return 0;
3755
}
3756

    
3757
void helper_mulq_EAX_T0(void)
3758
{
3759
    uint64_t r0, r1;
3760

    
3761
    mulu64(&r0, &r1, EAX, T0);
3762
    EAX = r0;
3763
    EDX = r1;
3764
    CC_DST = r0;
3765
    CC_SRC = r1;
3766
}
3767

    
3768
void helper_imulq_EAX_T0(void)
3769
{
3770
    uint64_t r0, r1;
3771

    
3772
    muls64(&r0, &r1, EAX, T0);
3773
    EAX = r0;
3774
    EDX = r1;
3775
    CC_DST = r0;
3776
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3777
}
3778

    
3779
void helper_imulq_T0_T1(void)
3780
{
3781
    uint64_t r0, r1;
3782

    
3783
    muls64(&r0, &r1, T0, T1);
3784
    T0 = r0;
3785
    CC_DST = r0;
3786
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3787
}
3788

    
3789
void helper_divq_EAX_T0(void)
3790
{
3791
    uint64_t r0, r1;
3792
    if (T0 == 0) {
3793
        raise_exception(EXCP00_DIVZ);
3794
    }
3795
    r0 = EAX;
3796
    r1 = EDX;
3797
    if (div64(&r0, &r1, T0))
3798
        raise_exception(EXCP00_DIVZ);
3799
    EAX = r0;
3800
    EDX = r1;
3801
}
3802

    
3803
void helper_idivq_EAX_T0(void)
3804
{
3805
    uint64_t r0, r1;
3806
    if (T0 == 0) {
3807
        raise_exception(EXCP00_DIVZ);
3808
    }
3809
    r0 = EAX;
3810
    r1 = EDX;
3811
    if (idiv64(&r0, &r1, T0))
3812
        raise_exception(EXCP00_DIVZ);
3813
    EAX = r0;
3814
    EDX = r1;
3815
}
3816

    
3817
void helper_bswapq_T0(void)
3818
{
3819
    T0 = bswap64(T0);
3820
}
3821
#endif
3822

    
3823
void helper_hlt(void)
3824
{
3825
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3826
    env->hflags |= HF_HALTED_MASK;
3827
    env->exception_index = EXCP_HLT;
3828
    cpu_loop_exit();
3829
}
3830

    
3831
void helper_monitor(void)
3832
{
3833
    if ((uint32_t)ECX != 0)
3834
        raise_exception(EXCP0D_GPF);
3835
    /* XXX: store address ? */
3836
}
3837

    
3838
void helper_mwait(void)
3839
{
3840
    if ((uint32_t)ECX != 0)
3841
        raise_exception(EXCP0D_GPF);
3842
    /* XXX: not complete but not completely erroneous */
3843
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3844
        /* more than one CPU: do not sleep because another CPU may
3845
           wake this one */
3846
    } else {
3847
        helper_hlt();
3848
    }
3849
}
3850

    
3851
float approx_rsqrt(float a)
3852
{
3853
    return 1.0 / sqrt(a);
3854
}
3855

    
3856
float approx_rcp(float a)
3857
{
3858
    return 1.0 / a;
3859
}
3860

    
3861
void update_fp_status(void)
3862
{
3863
    int rnd_type;
3864

    
3865
    /* set rounding mode */
3866
    switch(env->fpuc & RC_MASK) {
3867
    default:
3868
    case RC_NEAR:
3869
        rnd_type = float_round_nearest_even;
3870
        break;
3871
    case RC_DOWN:
3872
        rnd_type = float_round_down;
3873
        break;
3874
    case RC_UP:
3875
        rnd_type = float_round_up;
3876
        break;
3877
    case RC_CHOP:
3878
        rnd_type = float_round_to_zero;
3879
        break;
3880
    }
3881
    set_float_rounding_mode(rnd_type, &env->fp_status);
3882
#ifdef FLOATX80
3883
    switch((env->fpuc >> 8) & 3) {
3884
    case 0:
3885
        rnd_type = 32;
3886
        break;
3887
    case 2:
3888
        rnd_type = 64;
3889
        break;
3890
    case 3:
3891
    default:
3892
        rnd_type = 80;
3893
        break;
3894
    }
3895
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3896
#endif
3897
}
3898

    
3899
#if !defined(CONFIG_USER_ONLY)
3900

    
3901
#define MMUSUFFIX _mmu
3902
#ifdef __s390__
3903
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3904
#else
3905
# define GETPC() (__builtin_return_address(0))
3906
#endif
3907

    
3908
#define SHIFT 0
3909
#include "softmmu_template.h"
3910

    
3911
#define SHIFT 1
3912
#include "softmmu_template.h"
3913

    
3914
#define SHIFT 2
3915
#include "softmmu_template.h"
3916

    
3917
#define SHIFT 3
3918
#include "softmmu_template.h"
3919

    
3920
#endif
3921

    
3922
/* try to fill the TLB and return an exception if error. If retaddr is
3923
   NULL, it means that the function was called in C code (i.e. not
3924
   from generated code or from helper.c) */
3925
/* XXX: fix it to restore all registers */
3926
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3927
{
3928
    TranslationBlock *tb;
3929
    int ret;
3930
    unsigned long pc;
3931
    CPUX86State *saved_env;
3932

    
3933
    /* XXX: hack to restore env in all cases, even if not called from
3934
       generated code */
3935
    saved_env = env;
3936
    env = cpu_single_env;
3937

    
3938
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3939
    if (ret) {
3940
        if (retaddr) {
3941
            /* now we have a real cpu fault */
3942
            pc = (unsigned long)retaddr;
3943
            tb = tb_find_pc(pc);
3944
            if (tb) {
3945
                /* the PC is inside the translated code. It means that we have
3946
                   a virtual CPU fault */
3947
                cpu_restore_state(tb, env, pc, NULL);
3948
            }
3949
        }
3950
        if (retaddr)
3951
            raise_exception_err(env->exception_index, env->error_code);
3952
        else
3953
            raise_exception_err_norestore(env->exception_index, env->error_code);
3954
    }
3955
    env = saved_env;
3956
}
3957

    
3958

    
3959
/* Secure Virtual Machine helpers */
3960

    
3961
void helper_stgi(void)
3962
{
3963
    env->hflags |= HF_GIF_MASK;
3964
}
3965

    
3966
void helper_clgi(void)
3967
{
3968
    env->hflags &= ~HF_GIF_MASK;
3969
}
3970

    
3971
#if defined(CONFIG_USER_ONLY)
3972

    
3973
void helper_vmrun(target_ulong addr) { }
3974
void helper_vmmcall(void) { }
3975
void helper_vmload(target_ulong addr) { }
3976
void helper_vmsave(target_ulong addr) { }
3977
void helper_skinit(void) { }
3978
void helper_invlpga(void) { }
3979
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3980
int svm_check_intercept_param(uint32_t type, uint64_t param)
3981
{
3982
    return 0;
3983
}
3984

    
3985
#else
3986

    
3987
static inline uint32_t
3988
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3989
{
3990
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3991
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3992
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3993
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3994
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3995
}
3996

    
3997
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3998
{
3999
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4000
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4001
}
4002

    
4003
extern uint8_t *phys_ram_base;
4004
void helper_vmrun(target_ulong addr)
4005
{
4006
    uint32_t event_inj;
4007
    uint32_t int_ctl;
4008

    
4009
    if (loglevel & CPU_LOG_TB_IN_ASM)
4010
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4011

    
4012
    env->vm_vmcb = addr;
4013
    regs_to_env();
4014

    
4015
    /* save the current CPU state in the hsave page */
4016
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4017
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4018

    
4019
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4020
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4021

    
4022
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4023
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4024
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4025
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4026
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4027
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4028
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4029

    
4030
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4031
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4032

    
4033
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4034
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4035
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4036
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4037

    
4038
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4039
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4040
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4041

    
4042
    /* load the interception bitmaps so we do not need to access the
4043
       vmcb in svm mode */
4044
    /* We shift all the intercept bits so we can OR them with the TB
4045
       flags later on */
4046
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4047
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4048
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4049
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4050
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4051
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4052

    
4053
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4054
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4055

    
4056
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4057
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4058

    
4059
    /* clear exit_info_2 so we behave like the real hardware */
4060
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4061

    
4062
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4063
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4064
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4065
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4066
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4067
    if (int_ctl & V_INTR_MASKING_MASK) {
4068
        env->cr[8] = int_ctl & V_TPR_MASK;
4069
        cpu_set_apic_tpr(env, env->cr[8]);
4070
        if (env->eflags & IF_MASK)
4071
            env->hflags |= HF_HIF_MASK;
4072
    }
4073

    
4074
#ifdef TARGET_X86_64
4075
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4076
    env->hflags &= ~HF_LMA_MASK;
4077
    if (env->efer & MSR_EFER_LMA)
4078
       env->hflags |= HF_LMA_MASK;
4079
#endif
4080
    env->eflags = 0;
4081
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4082
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4083
    CC_OP = CC_OP_EFLAGS;
4084
    CC_DST = 0xffffffff;
4085

    
4086
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4087
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4088
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4089
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4090

    
4091
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4092
    env->eip = EIP;
4093
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4094
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4095
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4096
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4097
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4098

    
4099
    /* FIXME: guest state consistency checks */
4100

    
4101
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4102
        case TLB_CONTROL_DO_NOTHING:
4103
            break;
4104
        case TLB_CONTROL_FLUSH_ALL_ASID:
4105
            /* FIXME: this is not 100% correct but should work for now */
4106
            tlb_flush(env, 1);
4107
        break;
4108
    }
4109

    
4110
    helper_stgi();
4111

    
4112
    regs_to_env();
4113

    
4114
    /* maybe we need to inject an event */
4115
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4116
    if (event_inj & SVM_EVTINJ_VALID) {
4117
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4118
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4119
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4120
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4121

    
4122
        if (loglevel & CPU_LOG_TB_IN_ASM)
4123
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4124
        /* FIXME: need to implement valid_err */
4125
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4126
        case SVM_EVTINJ_TYPE_INTR:
4127
                env->exception_index = vector;
4128
                env->error_code = event_inj_err;
4129
                env->exception_is_int = 0;
4130
                env->exception_next_eip = -1;
4131
                if (loglevel & CPU_LOG_TB_IN_ASM)
4132
                    fprintf(logfile, "INTR");
4133
                break;
4134
        case SVM_EVTINJ_TYPE_NMI:
4135
                env->exception_index = vector;
4136
                env->error_code = event_inj_err;
4137
                env->exception_is_int = 0;
4138
                env->exception_next_eip = EIP;
4139
                if (loglevel & CPU_LOG_TB_IN_ASM)
4140
                    fprintf(logfile, "NMI");
4141
                break;
4142
        case SVM_EVTINJ_TYPE_EXEPT:
4143
                env->exception_index = vector;
4144
                env->error_code = event_inj_err;
4145
                env->exception_is_int = 0;
4146
                env->exception_next_eip = -1;
4147
                if (loglevel & CPU_LOG_TB_IN_ASM)
4148
                    fprintf(logfile, "EXEPT");
4149
                break;
4150
        case SVM_EVTINJ_TYPE_SOFT:
4151
                env->exception_index = vector;
4152
                env->error_code = event_inj_err;
4153
                env->exception_is_int = 1;
4154
                env->exception_next_eip = EIP;
4155
                if (loglevel & CPU_LOG_TB_IN_ASM)
4156
                    fprintf(logfile, "SOFT");
4157
                break;
4158
        }
4159
        if (loglevel & CPU_LOG_TB_IN_ASM)
4160
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4161
    }
4162
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4163
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4164
    }
4165

    
4166
    cpu_loop_exit();
4167
}
4168

    
4169
void helper_vmmcall(void)
4170
{
4171
    if (loglevel & CPU_LOG_TB_IN_ASM)
4172
        fprintf(logfile,"vmmcall!\n");
4173
}
4174

    
4175
void helper_vmload(target_ulong addr)
4176
{
4177
    if (loglevel & CPU_LOG_TB_IN_ASM)
4178
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4179
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4180
                env->segs[R_FS].base);
4181

    
4182
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4183
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4184
    SVM_LOAD_SEG2(addr, tr, tr);
4185
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4186

    
4187
#ifdef TARGET_X86_64
4188
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4189
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4190
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4191
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4192
#endif
4193
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4194
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4195
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4196
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4197
}
4198

    
4199
void helper_vmsave(target_ulong addr)
4200
{
4201
    if (loglevel & CPU_LOG_TB_IN_ASM)
4202
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4203
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4204
                env->segs[R_FS].base);
4205

    
4206
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4207
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4208
    SVM_SAVE_SEG(addr, tr, tr);
4209
    SVM_SAVE_SEG(addr, ldt, ldtr);
4210

    
4211
#ifdef TARGET_X86_64
4212
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4213
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4214
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4215
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4216
#endif
4217
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4218
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4219
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4220
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4221
}
4222

    
4223
void helper_skinit(void)
4224
{
4225
    if (loglevel & CPU_LOG_TB_IN_ASM)
4226
        fprintf(logfile,"skinit!\n");
4227
}
4228

    
4229
void helper_invlpga(void)
4230
{
4231
    tlb_flush(env, 0);
4232
}
4233

    
4234
int svm_check_intercept_param(uint32_t type, uint64_t param)
4235
{
4236
    switch(type) {
4237
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4238
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4239
            vmexit(type, param);
4240
            return 1;
4241
        }
4242
        break;
4243
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4244
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4245
            vmexit(type, param);
4246
            return 1;
4247
        }
4248
        break;
4249
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4250
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4251
            vmexit(type, param);
4252
            return 1;
4253
        }
4254
        break;
4255
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4256
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4257
            vmexit(type, param);
4258
            return 1;
4259
        }
4260
        break;
4261
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4262
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4263
            vmexit(type, param);
4264
            return 1;
4265
        }
4266
        break;
4267
    case SVM_EXIT_IOIO:
4268
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4269
            /* FIXME: this should be read in at vmrun (faster this way?) */
4270
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4271
            uint16_t port = (uint16_t) (param >> 16);
4272

    
4273
            uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4274
            if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4275
                vmexit(type, param);
4276
        }
4277
        break;
4278

    
4279
    case SVM_EXIT_MSR:
4280
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4281
            /* FIXME: this should be read in at vmrun (faster this way?) */
4282
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4283
            switch((uint32_t)ECX) {
4284
            case 0 ... 0x1fff:
4285
                T0 = (ECX * 2) % 8;
4286
                T1 = ECX / 8;
4287
                break;
4288
            case 0xc0000000 ... 0xc0001fff:
4289
                T0 = (8192 + ECX - 0xc0000000) * 2;
4290
                T1 = (T0 / 8);
4291
                T0 %= 8;
4292
                break;
4293
            case 0xc0010000 ... 0xc0011fff:
4294
                T0 = (16384 + ECX - 0xc0010000) * 2;
4295
                T1 = (T0 / 8);
4296
                T0 %= 8;
4297
                break;
4298
            default:
4299
                vmexit(type, param);
4300
                return 1;
4301
            }
4302
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4303
                vmexit(type, param);
4304
            return 1;
4305
        }
4306
        break;
4307
    default:
4308
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4309
            vmexit(type, param);
4310
            return 1;
4311
        }
4312
        break;
4313
    }
4314
    return 0;
4315
}
4316

    
4317
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4318
{
4319
    uint32_t int_ctl;
4320

    
4321
    if (loglevel & CPU_LOG_TB_IN_ASM)
4322
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4323
                exit_code, exit_info_1,
4324
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4325
                EIP);
4326

    
4327
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4328
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4329
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4330
    } else {
4331
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4332
    }
4333

    
4334
    /* Save the VM state in the vmcb */
4335
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4336
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4337
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4338
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4339

    
4340
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4341
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4342

    
4343
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4344
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4345

    
4346
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4347
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4348
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4349
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4350
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4351

    
4352
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4353
        int_ctl &= ~V_TPR_MASK;
4354
        int_ctl |= env->cr[8] & V_TPR_MASK;
4355
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4356
    }
4357

    
4358
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4359
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4360
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4361
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4362
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4363
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4364
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4365

    
4366
    /* Reload the host state from vm_hsave */
4367
    env->hflags &= ~HF_HIF_MASK;
4368
    env->intercept = 0;
4369
    env->intercept_exceptions = 0;
4370
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4371

    
4372
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4373
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4374

    
4375
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4376
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4377

    
4378
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4379
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4380
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4381
    if (int_ctl & V_INTR_MASKING_MASK) {
4382
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4383
        cpu_set_apic_tpr(env, env->cr[8]);
4384
    }
4385
    /* we need to set the efer after the crs so the hidden flags get set properly */
4386
#ifdef TARGET_X86_64
4387
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4388
    env->hflags &= ~HF_LMA_MASK;
4389
    if (env->efer & MSR_EFER_LMA)
4390
       env->hflags |= HF_LMA_MASK;
4391
#endif
4392

    
4393
    env->eflags = 0;
4394
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4395
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4396
    CC_OP = CC_OP_EFLAGS;
4397

    
4398
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4399
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4400
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4401
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4402

    
4403
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4404
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4405
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4406

    
4407
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4408
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4409

    
4410
    /* other setups */
4411
    cpu_x86_set_cpl(env, 0);
4412
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4413
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4414
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4415

    
4416
    helper_clgi();
4417
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4418

    
4419
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4420

    
4421
    /* Clears the TSC_OFFSET inside the processor. */
4422

    
4423
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4424
       from the page table indicated the host's CR3. If the PDPEs contain
4425
       illegal state, the processor causes a shutdown. */
4426

    
4427
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4428
    env->cr[0] |= CR0_PE_MASK;
4429
    env->eflags &= ~VM_MASK;
4430

    
4431
    /* Disables all breakpoints in the host DR7 register. */
4432

    
4433
    /* Checks the reloaded host state for consistency. */
4434

    
4435
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4436
       host's code segment or non-canonical (in the case of long mode), a
4437
       #GP fault is delivered inside the host.) */
4438

    
4439
    /* remove any pending exception */
4440
    env->exception_index = -1;
4441
    env->error_code = 0;
4442
    env->old_exception = -1;
4443

    
4444
    regs_to_env();
4445
    cpu_loop_exit();
4446
}
4447

    
4448
#endif