Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 57fec1fe

History | View | Annotate | Download (130.7 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        if (code64)
1012
            env->eip = env->lstar;
1013
        else
1014
            env->eip = env->cstar;
1015
    } else
1016
#endif
1017
    {
1018
        ECX = (uint32_t)(env->eip + next_eip_addend);
1019

    
1020
        cpu_x86_set_cpl(env, 0);
1021
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022
                           0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027
                               0, 0xffffffff,
1028
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029
                               DESC_S_MASK |
1030
                               DESC_W_MASK | DESC_A_MASK);
1031
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1032
        env->eip = (uint32_t)env->star;
1033
    }
1034
}
1035
#endif
1036

    
1037
void helper_sysret(int dflag)
1038
{
1039
    int cpl, selector;
1040

    
1041
    if (!(env->efer & MSR_EFER_SCE)) {
1042
        raise_exception_err(EXCP06_ILLOP, 0);
1043
    }
1044
    cpl = env->hflags & HF_CPL_MASK;
1045
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046
        raise_exception_err(EXCP0D_GPF, 0);
1047
    }
1048
    selector = (env->star >> 48) & 0xffff;
1049
#ifdef TARGET_X86_64
1050
    if (env->hflags & HF_LMA_MASK) {
1051
        if (dflag == 2) {
1052
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1053
                                   0, 0xffffffff,
1054
                                   DESC_G_MASK | DESC_P_MASK |
1055
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1057
                                   DESC_L_MASK);
1058
            env->eip = ECX;
1059
        } else {
1060
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1061
                                   0, 0xffffffff,
1062
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065
            env->eip = (uint32_t)ECX;
1066
        }
1067
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1068
                               0, 0xffffffff,
1069
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071
                               DESC_W_MASK | DESC_A_MASK);
1072
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1073
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1074
        cpu_x86_set_cpl(env, 3);
1075
    } else
1076
#endif
1077
    {
1078
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079
                               0, 0xffffffff,
1080
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083
        env->eip = (uint32_t)ECX;
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        env->eflags |= IF_MASK;
1090
        cpu_x86_set_cpl(env, 3);
1091
    }
1092
#ifdef USE_KQEMU
1093
    if (kqemu_is_ok(env)) {
1094
        if (env->hflags & HF_LMA_MASK)
1095
            CC_OP = CC_OP_EFLAGS;
1096
        env->exception_index = -1;
1097
        cpu_loop_exit();
1098
    }
1099
#endif
1100
}
1101

    
1102
/* real mode interrupt */
1103
static void do_interrupt_real(int intno, int is_int, int error_code,
1104
                              unsigned int next_eip)
1105
{
1106
    SegmentCache *dt;
1107
    target_ulong ptr, ssp;
1108
    int selector;
1109
    uint32_t offset, esp;
1110
    uint32_t old_cs, old_eip;
1111
    int svm_should_check = 1;
1112

    
1113
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1114
        next_eip = EIP;
1115
        svm_should_check = 0;
1116
    }
1117
    if (svm_should_check
1118
        && INTERCEPTEDl(_exceptions, 1 << intno)
1119
        && !is_int) {
1120
        raise_interrupt(intno, is_int, error_code, 0);
1121
    }
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privledge if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    char first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    char second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: %x new %x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    }
1280

    
1281
    env->exception_index = intno;
1282
    env->error_code = error_code;
1283
    env->exception_is_int = is_int;
1284
    env->exception_next_eip = env->eip + next_eip_addend;
1285
    cpu_loop_exit();
1286
}
1287

    
1288
/* same as raise_exception_err, but do not restore global registers */
1289
static void raise_exception_err_norestore(int exception_index, int error_code)
1290
{
1291
    exception_index = check_exception(exception_index, &error_code);
1292

    
1293
    env->exception_index = exception_index;
1294
    env->error_code = error_code;
1295
    env->exception_is_int = 0;
1296
    env->exception_next_eip = 0;
1297
    longjmp(env->jmp_env, 1);
1298
}
1299

    
1300
/* shortcuts to generate exceptions */
1301

    
1302
void (raise_exception_err)(int exception_index, int error_code)
1303
{
1304
    raise_interrupt(exception_index, 0, error_code, 0);
1305
}
1306

    
1307
void raise_exception(int exception_index)
1308
{
1309
    raise_interrupt(exception_index, 0, 0, 0);
1310
}
1311

    
1312
/* SMM support */
1313

    
1314
#if defined(CONFIG_USER_ONLY)
1315

    
1316
void do_smm_enter(void)
1317
{
1318
}
1319

    
1320
void helper_rsm(void)
1321
{
1322
}
1323

    
1324
#else
1325

    
1326
#ifdef TARGET_X86_64
1327
#define SMM_REVISION_ID 0x00020064
1328
#else
1329
#define SMM_REVISION_ID 0x00020000
1330
#endif
1331

    
1332
void do_smm_enter(void)
1333
{
1334
    target_ulong sm_state;
1335
    SegmentCache *dt;
1336
    int i, offset;
1337

    
1338
    if (loglevel & CPU_LOG_INT) {
1339
        fprintf(logfile, "SMM: enter\n");
1340
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1341
    }
1342

    
1343
    env->hflags |= HF_SMM_MASK;
1344
    cpu_smm_update(env);
1345

    
1346
    sm_state = env->smbase + 0x8000;
1347

    
1348
#ifdef TARGET_X86_64
1349
    for(i = 0; i < 6; i++) {
1350
        dt = &env->segs[i];
1351
        offset = 0x7e00 + i * 16;
1352
        stw_phys(sm_state + offset, dt->selector);
1353
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1354
        stl_phys(sm_state + offset + 4, dt->limit);
1355
        stq_phys(sm_state + offset + 8, dt->base);
1356
    }
1357

    
1358
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1359
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1360

    
1361
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1362
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1363
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1364
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1365

    
1366
    stq_phys(sm_state + 0x7e88, env->idt.base);
1367
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1368

    
1369
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1370
    stq_phys(sm_state + 0x7e98, env->tr.base);
1371
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1372
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1373

    
1374
    stq_phys(sm_state + 0x7ed0, env->efer);
1375

    
1376
    stq_phys(sm_state + 0x7ff8, EAX);
1377
    stq_phys(sm_state + 0x7ff0, ECX);
1378
    stq_phys(sm_state + 0x7fe8, EDX);
1379
    stq_phys(sm_state + 0x7fe0, EBX);
1380
    stq_phys(sm_state + 0x7fd8, ESP);
1381
    stq_phys(sm_state + 0x7fd0, EBP);
1382
    stq_phys(sm_state + 0x7fc8, ESI);
1383
    stq_phys(sm_state + 0x7fc0, EDI);
1384
    for(i = 8; i < 16; i++)
1385
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1386
    stq_phys(sm_state + 0x7f78, env->eip);
1387
    stl_phys(sm_state + 0x7f70, compute_eflags());
1388
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1389
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1390

    
1391
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1392
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1393
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1394

    
1395
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1396
    stl_phys(sm_state + 0x7f00, env->smbase);
1397
#else
1398
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1399
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1400
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1401
    stl_phys(sm_state + 0x7ff0, env->eip);
1402
    stl_phys(sm_state + 0x7fec, EDI);
1403
    stl_phys(sm_state + 0x7fe8, ESI);
1404
    stl_phys(sm_state + 0x7fe4, EBP);
1405
    stl_phys(sm_state + 0x7fe0, ESP);
1406
    stl_phys(sm_state + 0x7fdc, EBX);
1407
    stl_phys(sm_state + 0x7fd8, EDX);
1408
    stl_phys(sm_state + 0x7fd4, ECX);
1409
    stl_phys(sm_state + 0x7fd0, EAX);
1410
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1411
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1412

    
1413
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1414
    stl_phys(sm_state + 0x7f64, env->tr.base);
1415
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1416
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1417

    
1418
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1419
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1420
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1421
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1424
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1425

    
1426
    stl_phys(sm_state + 0x7f58, env->idt.base);
1427
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1428

    
1429
    for(i = 0; i < 6; i++) {
1430
        dt = &env->segs[i];
1431
        if (i < 3)
1432
            offset = 0x7f84 + i * 12;
1433
        else
1434
            offset = 0x7f2c + (i - 3) * 12;
1435
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1436
        stl_phys(sm_state + offset + 8, dt->base);
1437
        stl_phys(sm_state + offset + 4, dt->limit);
1438
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1439
    }
1440
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1441

    
1442
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443
    stl_phys(sm_state + 0x7ef8, env->smbase);
1444
#endif
1445
    /* init SMM cpu state */
1446

    
1447
#ifdef TARGET_X86_64
1448
    env->efer = 0;
1449
    env->hflags &= ~HF_LMA_MASK;
1450
#endif
1451
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1452
    env->eip = 0x00008000;
1453
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1454
                           0xffffffff, 0);
1455
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1460

    
1461
    cpu_x86_update_cr0(env,
1462
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1463
    cpu_x86_update_cr4(env, 0);
1464
    env->dr[7] = 0x00000400;
1465
    CC_OP = CC_OP_EFLAGS;
1466
}
1467

    
1468
void helper_rsm(void)
1469
{
1470
    target_ulong sm_state;
1471
    int i, offset;
1472
    uint32_t val;
1473

    
1474
    sm_state = env->smbase + 0x8000;
1475
#ifdef TARGET_X86_64
1476
    env->efer = ldq_phys(sm_state + 0x7ed0);
1477
    if (env->efer & MSR_EFER_LMA)
1478
        env->hflags |= HF_LMA_MASK;
1479
    else
1480
        env->hflags &= ~HF_LMA_MASK;
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
#ifdef BUGGY_GCC_DIV64
1596
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597
   call it from another function */
1598
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1599
{
1600
    *q_ptr = num / den;
1601
    return num % den;
1602
}
1603

    
1604
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1605
{
1606
    *q_ptr = num / den;
1607
    return num % den;
1608
}
1609
#endif
1610

    
1611
void helper_divl_EAX_T0(target_ulong t0)
1612
{
1613
    unsigned int den, r;
1614
    uint64_t num, q;
1615

    
1616
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1617
    den = t0;
1618
    if (den == 0) {
1619
        raise_exception(EXCP00_DIVZ);
1620
    }
1621
#ifdef BUGGY_GCC_DIV64
1622
    r = div32(&q, num, den);
1623
#else
1624
    q = (num / den);
1625
    r = (num % den);
1626
#endif
1627
    if (q > 0xffffffff)
1628
        raise_exception(EXCP00_DIVZ);
1629
    EAX = (uint32_t)q;
1630
    EDX = (uint32_t)r;
1631
}
1632

    
1633
void helper_idivl_EAX_T0(target_ulong t0)
1634
{
1635
    int den, r;
1636
    int64_t num, q;
1637

    
1638
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1639
    den = t0;
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
#ifdef BUGGY_GCC_DIV64
1644
    r = idiv32(&q, num, den);
1645
#else
1646
    q = (num / den);
1647
    r = (num % den);
1648
#endif
1649
    if (q != (int32_t)q)
1650
        raise_exception(EXCP00_DIVZ);
1651
    EAX = (uint32_t)q;
1652
    EDX = (uint32_t)r;
1653
}
1654

    
1655
void helper_cmpxchg8b(void)
1656
{
1657
    uint64_t d;
1658
    int eflags;
1659

    
1660
    eflags = cc_table[CC_OP].compute_all();
1661
    d = ldq(A0);
1662
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1663
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1664
        eflags |= CC_Z;
1665
    } else {
1666
        EDX = d >> 32;
1667
        EAX = d;
1668
        eflags &= ~CC_Z;
1669
    }
1670
    CC_SRC = eflags;
1671
}
1672

    
1673
void helper_single_step()
1674
{
1675
    env->dr[6] |= 0x4000;
1676
    raise_exception(EXCP01_SSTP);
1677
}
1678

    
1679
void helper_cpuid(void)
1680
{
1681
    uint32_t index;
1682
    index = (uint32_t)EAX;
1683

    
1684
    /* test if maximum index reached */
1685
    if (index & 0x80000000) {
1686
        if (index > env->cpuid_xlevel)
1687
            index = env->cpuid_level;
1688
    } else {
1689
        if (index > env->cpuid_level)
1690
            index = env->cpuid_level;
1691
    }
1692

    
1693
    switch(index) {
1694
    case 0:
1695
        EAX = env->cpuid_level;
1696
        EBX = env->cpuid_vendor1;
1697
        EDX = env->cpuid_vendor2;
1698
        ECX = env->cpuid_vendor3;
1699
        break;
1700
    case 1:
1701
        EAX = env->cpuid_version;
1702
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703
        ECX = env->cpuid_ext_features;
1704
        EDX = env->cpuid_features;
1705
        break;
1706
    case 2:
1707
        /* cache info: needed for Pentium Pro compatibility */
1708
        EAX = 1;
1709
        EBX = 0;
1710
        ECX = 0;
1711
        EDX = 0x2c307d;
1712
        break;
1713
    case 0x80000000:
1714
        EAX = env->cpuid_xlevel;
1715
        EBX = env->cpuid_vendor1;
1716
        EDX = env->cpuid_vendor2;
1717
        ECX = env->cpuid_vendor3;
1718
        break;
1719
    case 0x80000001:
1720
        EAX = env->cpuid_features;
1721
        EBX = 0;
1722
        ECX = env->cpuid_ext3_features;
1723
        EDX = env->cpuid_ext2_features;
1724
        break;
1725
    case 0x80000002:
1726
    case 0x80000003:
1727
    case 0x80000004:
1728
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1729
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1730
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1731
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1732
        break;
1733
    case 0x80000005:
1734
        /* cache info (L1 cache) */
1735
        EAX = 0x01ff01ff;
1736
        EBX = 0x01ff01ff;
1737
        ECX = 0x40020140;
1738
        EDX = 0x40020140;
1739
        break;
1740
    case 0x80000006:
1741
        /* cache info (L2 cache) */
1742
        EAX = 0;
1743
        EBX = 0x42004200;
1744
        ECX = 0x02008140;
1745
        EDX = 0;
1746
        break;
1747
    case 0x80000008:
1748
        /* virtual & phys address size in low 2 bytes. */
1749
        EAX = 0x00003028;
1750
        EBX = 0;
1751
        ECX = 0;
1752
        EDX = 0;
1753
        break;
1754
    case 0x8000000A:
1755
        EAX = 0x00000001;
1756
        EBX = 0;
1757
        ECX = 0;
1758
        EDX = 0;
1759
        break;
1760
    default:
1761
        /* reserved values: zero */
1762
        EAX = 0;
1763
        EBX = 0;
1764
        ECX = 0;
1765
        EDX = 0;
1766
        break;
1767
    }
1768
}
1769

    
1770
void helper_enter_level(int level, int data32)
1771
{
1772
    target_ulong ssp;
1773
    uint32_t esp_mask, esp, ebp;
1774

    
1775
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1776
    ssp = env->segs[R_SS].base;
1777
    ebp = EBP;
1778
    esp = ESP;
1779
    if (data32) {
1780
        /* 32 bit */
1781
        esp -= 4;
1782
        while (--level) {
1783
            esp -= 4;
1784
            ebp -= 4;
1785
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1786
        }
1787
        esp -= 4;
1788
        stl(ssp + (esp & esp_mask), T1);
1789
    } else {
1790
        /* 16 bit */
1791
        esp -= 2;
1792
        while (--level) {
1793
            esp -= 2;
1794
            ebp -= 2;
1795
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1796
        }
1797
        esp -= 2;
1798
        stw(ssp + (esp & esp_mask), T1);
1799
    }
1800
}
1801

    
1802
#ifdef TARGET_X86_64
1803
void helper_enter64_level(int level, int data64)
1804
{
1805
    target_ulong esp, ebp;
1806
    ebp = EBP;
1807
    esp = ESP;
1808

    
1809
    if (data64) {
1810
        /* 64 bit */
1811
        esp -= 8;
1812
        while (--level) {
1813
            esp -= 8;
1814
            ebp -= 8;
1815
            stq(esp, ldq(ebp));
1816
        }
1817
        esp -= 8;
1818
        stq(esp, T1);
1819
    } else {
1820
        /* 16 bit */
1821
        esp -= 2;
1822
        while (--level) {
1823
            esp -= 2;
1824
            ebp -= 2;
1825
            stw(esp, lduw(ebp));
1826
        }
1827
        esp -= 2;
1828
        stw(esp, T1);
1829
    }
1830
}
1831
#endif
1832

    
1833
void helper_lldt_T0(void)
1834
{
1835
    int selector;
1836
    SegmentCache *dt;
1837
    uint32_t e1, e2;
1838
    int index, entry_limit;
1839
    target_ulong ptr;
1840

    
1841
    selector = T0 & 0xffff;
1842
    if ((selector & 0xfffc) == 0) {
1843
        /* XXX: NULL selector case: invalid LDT */
1844
        env->ldt.base = 0;
1845
        env->ldt.limit = 0;
1846
    } else {
1847
        if (selector & 0x4)
1848
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1849
        dt = &env->gdt;
1850
        index = selector & ~7;
1851
#ifdef TARGET_X86_64
1852
        if (env->hflags & HF_LMA_MASK)
1853
            entry_limit = 15;
1854
        else
1855
#endif
1856
            entry_limit = 7;
1857
        if ((index + entry_limit) > dt->limit)
1858
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1859
        ptr = dt->base + index;
1860
        e1 = ldl_kernel(ptr);
1861
        e2 = ldl_kernel(ptr + 4);
1862
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1863
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1864
        if (!(e2 & DESC_P_MASK))
1865
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1866
#ifdef TARGET_X86_64
1867
        if (env->hflags & HF_LMA_MASK) {
1868
            uint32_t e3;
1869
            e3 = ldl_kernel(ptr + 8);
1870
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1871
            env->ldt.base |= (target_ulong)e3 << 32;
1872
        } else
1873
#endif
1874
        {
1875
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1876
        }
1877
    }
1878
    env->ldt.selector = selector;
1879
}
1880

    
1881
void helper_ltr_T0(void)
1882
{
1883
    int selector;
1884
    SegmentCache *dt;
1885
    uint32_t e1, e2;
1886
    int index, type, entry_limit;
1887
    target_ulong ptr;
1888

    
1889
    selector = T0 & 0xffff;
1890
    if ((selector & 0xfffc) == 0) {
1891
        /* NULL selector case: invalid TR */
1892
        env->tr.base = 0;
1893
        env->tr.limit = 0;
1894
        env->tr.flags = 0;
1895
    } else {
1896
        if (selector & 0x4)
1897
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1898
        dt = &env->gdt;
1899
        index = selector & ~7;
1900
#ifdef TARGET_X86_64
1901
        if (env->hflags & HF_LMA_MASK)
1902
            entry_limit = 15;
1903
        else
1904
#endif
1905
            entry_limit = 7;
1906
        if ((index + entry_limit) > dt->limit)
1907
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1908
        ptr = dt->base + index;
1909
        e1 = ldl_kernel(ptr);
1910
        e2 = ldl_kernel(ptr + 4);
1911
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1912
        if ((e2 & DESC_S_MASK) ||
1913
            (type != 1 && type != 9))
1914
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1915
        if (!(e2 & DESC_P_MASK))
1916
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1917
#ifdef TARGET_X86_64
1918
        if (env->hflags & HF_LMA_MASK) {
1919
            uint32_t e3, e4;
1920
            e3 = ldl_kernel(ptr + 8);
1921
            e4 = ldl_kernel(ptr + 12);
1922
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1923
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1924
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1925
            env->tr.base |= (target_ulong)e3 << 32;
1926
        } else
1927
#endif
1928
        {
1929
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1930
        }
1931
        e2 |= DESC_TSS_BUSY_MASK;
1932
        stl_kernel(ptr + 4, e2);
1933
    }
1934
    env->tr.selector = selector;
1935
}
1936

    
1937
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1938
void load_seg(int seg_reg, int selector)
1939
{
1940
    uint32_t e1, e2;
1941
    int cpl, dpl, rpl;
1942
    SegmentCache *dt;
1943
    int index;
1944
    target_ulong ptr;
1945

    
1946
    selector &= 0xffff;
1947
    cpl = env->hflags & HF_CPL_MASK;
1948
    if ((selector & 0xfffc) == 0) {
1949
        /* null selector case */
1950
        if (seg_reg == R_SS
1951
#ifdef TARGET_X86_64
1952
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1953
#endif
1954
            )
1955
            raise_exception_err(EXCP0D_GPF, 0);
1956
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1957
    } else {
1958

    
1959
        if (selector & 0x4)
1960
            dt = &env->ldt;
1961
        else
1962
            dt = &env->gdt;
1963
        index = selector & ~7;
1964
        if ((index + 7) > dt->limit)
1965
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1966
        ptr = dt->base + index;
1967
        e1 = ldl_kernel(ptr);
1968
        e2 = ldl_kernel(ptr + 4);
1969

    
1970
        if (!(e2 & DESC_S_MASK))
1971
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1972
        rpl = selector & 3;
1973
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1974
        if (seg_reg == R_SS) {
1975
            /* must be writable segment */
1976
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1977
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1978
            if (rpl != cpl || dpl != cpl)
1979
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1980
        } else {
1981
            /* must be readable segment */
1982
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1983
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1984

    
1985
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1986
                /* if not conforming code, test rights */
1987
                if (dpl < cpl || dpl < rpl)
1988
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1989
            }
1990
        }
1991

    
1992
        if (!(e2 & DESC_P_MASK)) {
1993
            if (seg_reg == R_SS)
1994
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1995
            else
1996
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1997
        }
1998

    
1999
        /* set the access bit if not already set */
2000
        if (!(e2 & DESC_A_MASK)) {
2001
            e2 |= DESC_A_MASK;
2002
            stl_kernel(ptr + 4, e2);
2003
        }
2004

    
2005
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2006
                       get_seg_base(e1, e2),
2007
                       get_seg_limit(e1, e2),
2008
                       e2);
2009
#if 0
2010
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2011
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2012
#endif
2013
    }
2014
}
2015

    
2016
/* protected mode jump */
2017
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2018
{
2019
    int new_cs, gate_cs, type;
2020
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2021
    target_ulong new_eip, next_eip;
2022

    
2023
    new_cs = T0;
2024
    new_eip = T1;
2025
    if ((new_cs & 0xfffc) == 0)
2026
        raise_exception_err(EXCP0D_GPF, 0);
2027
    if (load_segment(&e1, &e2, new_cs) != 0)
2028
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2029
    cpl = env->hflags & HF_CPL_MASK;
2030
    if (e2 & DESC_S_MASK) {
2031
        if (!(e2 & DESC_CS_MASK))
2032
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2033
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2034
        if (e2 & DESC_C_MASK) {
2035
            /* conforming code segment */
2036
            if (dpl > cpl)
2037
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2038
        } else {
2039
            /* non conforming code segment */
2040
            rpl = new_cs & 3;
2041
            if (rpl > cpl)
2042
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2043
            if (dpl != cpl)
2044
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2045
        }
2046
        if (!(e2 & DESC_P_MASK))
2047
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2048
        limit = get_seg_limit(e1, e2);
2049
        if (new_eip > limit &&
2050
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2051
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2052
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2053
                       get_seg_base(e1, e2), limit, e2);
2054
        EIP = new_eip;
2055
    } else {
2056
        /* jump to call or task gate */
2057
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2058
        rpl = new_cs & 3;
2059
        cpl = env->hflags & HF_CPL_MASK;
2060
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2061
        switch(type) {
2062
        case 1: /* 286 TSS */
2063
        case 9: /* 386 TSS */
2064
        case 5: /* task gate */
2065
            if (dpl < cpl || dpl < rpl)
2066
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2067
            next_eip = env->eip + next_eip_addend;
2068
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2069
            CC_OP = CC_OP_EFLAGS;
2070
            break;
2071
        case 4: /* 286 call gate */
2072
        case 12: /* 386 call gate */
2073
            if ((dpl < cpl) || (dpl < rpl))
2074
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2075
            if (!(e2 & DESC_P_MASK))
2076
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2077
            gate_cs = e1 >> 16;
2078
            new_eip = (e1 & 0xffff);
2079
            if (type == 12)
2080
                new_eip |= (e2 & 0xffff0000);
2081
            if (load_segment(&e1, &e2, gate_cs) != 0)
2082
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2083
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2084
            /* must be code segment */
2085
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2086
                 (DESC_S_MASK | DESC_CS_MASK)))
2087
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2088
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2089
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2090
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2091
            if (!(e2 & DESC_P_MASK))
2092
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2093
            limit = get_seg_limit(e1, e2);
2094
            if (new_eip > limit)
2095
                raise_exception_err(EXCP0D_GPF, 0);
2096
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2097
                                   get_seg_base(e1, e2), limit, e2);
2098
            EIP = new_eip;
2099
            break;
2100
        default:
2101
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2102
            break;
2103
        }
2104
    }
2105
}
2106

    
2107
/* real mode call */
2108
void helper_lcall_real_T0_T1(int shift, int next_eip)
2109
{
2110
    int new_cs, new_eip;
2111
    uint32_t esp, esp_mask;
2112
    target_ulong ssp;
2113

    
2114
    new_cs = T0;
2115
    new_eip = T1;
2116
    esp = ESP;
2117
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2118
    ssp = env->segs[R_SS].base;
2119
    if (shift) {
2120
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2121
        PUSHL(ssp, esp, esp_mask, next_eip);
2122
    } else {
2123
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2124
        PUSHW(ssp, esp, esp_mask, next_eip);
2125
    }
2126

    
2127
    SET_ESP(esp, esp_mask);
2128
    env->eip = new_eip;
2129
    env->segs[R_CS].selector = new_cs;
2130
    env->segs[R_CS].base = (new_cs << 4);
2131
}
2132

    
2133
/* protected mode call */
2134
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2135
{
2136
    int new_cs, new_stack, i;
2137
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2138
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2139
    uint32_t val, limit, old_sp_mask;
2140
    target_ulong ssp, old_ssp, next_eip, new_eip;
2141

    
2142
    new_cs = T0;
2143
    new_eip = T1;
2144
    next_eip = env->eip + next_eip_addend;
2145
#ifdef DEBUG_PCALL
2146
    if (loglevel & CPU_LOG_PCALL) {
2147
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2148
                new_cs, (uint32_t)new_eip, shift);
2149
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2150
    }
2151
#endif
2152
    if ((new_cs & 0xfffc) == 0)
2153
        raise_exception_err(EXCP0D_GPF, 0);
2154
    if (load_segment(&e1, &e2, new_cs) != 0)
2155
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156
    cpl = env->hflags & HF_CPL_MASK;
2157
#ifdef DEBUG_PCALL
2158
    if (loglevel & CPU_LOG_PCALL) {
2159
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2160
    }
2161
#endif
2162
    if (e2 & DESC_S_MASK) {
2163
        if (!(e2 & DESC_CS_MASK))
2164
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2165
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2166
        if (e2 & DESC_C_MASK) {
2167
            /* conforming code segment */
2168
            if (dpl > cpl)
2169
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2170
        } else {
2171
            /* non conforming code segment */
2172
            rpl = new_cs & 3;
2173
            if (rpl > cpl)
2174
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2175
            if (dpl != cpl)
2176
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2177
        }
2178
        if (!(e2 & DESC_P_MASK))
2179
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2180

    
2181
#ifdef TARGET_X86_64
2182
        /* XXX: check 16/32 bit cases in long mode */
2183
        if (shift == 2) {
2184
            target_ulong rsp;
2185
            /* 64 bit case */
2186
            rsp = ESP;
2187
            PUSHQ(rsp, env->segs[R_CS].selector);
2188
            PUSHQ(rsp, next_eip);
2189
            /* from this point, not restartable */
2190
            ESP = rsp;
2191
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2192
                                   get_seg_base(e1, e2),
2193
                                   get_seg_limit(e1, e2), e2);
2194
            EIP = new_eip;
2195
        } else
2196
#endif
2197
        {
2198
            sp = ESP;
2199
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2200
            ssp = env->segs[R_SS].base;
2201
            if (shift) {
2202
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2203
                PUSHL(ssp, sp, sp_mask, next_eip);
2204
            } else {
2205
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2206
                PUSHW(ssp, sp, sp_mask, next_eip);
2207
            }
2208

    
2209
            limit = get_seg_limit(e1, e2);
2210
            if (new_eip > limit)
2211
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
            /* from this point, not restartable */
2213
            SET_ESP(sp, sp_mask);
2214
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2215
                                   get_seg_base(e1, e2), limit, e2);
2216
            EIP = new_eip;
2217
        }
2218
    } else {
2219
        /* check gate type */
2220
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2221
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2222
        rpl = new_cs & 3;
2223
        switch(type) {
2224
        case 1: /* available 286 TSS */
2225
        case 9: /* available 386 TSS */
2226
        case 5: /* task gate */
2227
            if (dpl < cpl || dpl < rpl)
2228
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2229
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2230
            CC_OP = CC_OP_EFLAGS;
2231
            return;
2232
        case 4: /* 286 call gate */
2233
        case 12: /* 386 call gate */
2234
            break;
2235
        default:
2236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
            break;
2238
        }
2239
        shift = type >> 3;
2240

    
2241
        if (dpl < cpl || dpl < rpl)
2242
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243
        /* check valid bit */
2244
        if (!(e2 & DESC_P_MASK))
2245
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2246
        selector = e1 >> 16;
2247
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2248
        param_count = e2 & 0x1f;
2249
        if ((selector & 0xfffc) == 0)
2250
            raise_exception_err(EXCP0D_GPF, 0);
2251

    
2252
        if (load_segment(&e1, &e2, selector) != 0)
2253
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2254
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2255
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2256
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2257
        if (dpl > cpl)
2258
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2259
        if (!(e2 & DESC_P_MASK))
2260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2261

    
2262
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2263
            /* to inner privilege */
2264
            get_ss_esp_from_tss(&ss, &sp, dpl);
2265
#ifdef DEBUG_PCALL
2266
            if (loglevel & CPU_LOG_PCALL)
2267
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2268
                        ss, sp, param_count, ESP);
2269
#endif
2270
            if ((ss & 0xfffc) == 0)
2271
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2272
            if ((ss & 3) != dpl)
2273
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2274
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2275
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2276
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2277
            if (ss_dpl != dpl)
2278
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2279
            if (!(ss_e2 & DESC_S_MASK) ||
2280
                (ss_e2 & DESC_CS_MASK) ||
2281
                !(ss_e2 & DESC_W_MASK))
2282
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2283
            if (!(ss_e2 & DESC_P_MASK))
2284
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2285

    
2286
            //            push_size = ((param_count * 2) + 8) << shift;
2287

    
2288
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2289
            old_ssp = env->segs[R_SS].base;
2290

    
2291
            sp_mask = get_sp_mask(ss_e2);
2292
            ssp = get_seg_base(ss_e1, ss_e2);
2293
            if (shift) {
2294
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2295
                PUSHL(ssp, sp, sp_mask, ESP);
2296
                for(i = param_count - 1; i >= 0; i--) {
2297
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2298
                    PUSHL(ssp, sp, sp_mask, val);
2299
                }
2300
            } else {
2301
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2302
                PUSHW(ssp, sp, sp_mask, ESP);
2303
                for(i = param_count - 1; i >= 0; i--) {
2304
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2305
                    PUSHW(ssp, sp, sp_mask, val);
2306
                }
2307
            }
2308
            new_stack = 1;
2309
        } else {
2310
            /* to same privilege */
2311
            sp = ESP;
2312
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2313
            ssp = env->segs[R_SS].base;
2314
            //            push_size = (4 << shift);
2315
            new_stack = 0;
2316
        }
2317

    
2318
        if (shift) {
2319
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2320
            PUSHL(ssp, sp, sp_mask, next_eip);
2321
        } else {
2322
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2323
            PUSHW(ssp, sp, sp_mask, next_eip);
2324
        }
2325

    
2326
        /* from this point, not restartable */
2327

    
2328
        if (new_stack) {
2329
            ss = (ss & ~3) | dpl;
2330
            cpu_x86_load_seg_cache(env, R_SS, ss,
2331
                                   ssp,
2332
                                   get_seg_limit(ss_e1, ss_e2),
2333
                                   ss_e2);
2334
        }
2335

    
2336
        selector = (selector & ~3) | dpl;
2337
        cpu_x86_load_seg_cache(env, R_CS, selector,
2338
                       get_seg_base(e1, e2),
2339
                       get_seg_limit(e1, e2),
2340
                       e2);
2341
        cpu_x86_set_cpl(env, dpl);
2342
        SET_ESP(sp, sp_mask);
2343
        EIP = offset;
2344
    }
2345
#ifdef USE_KQEMU
2346
    if (kqemu_is_ok(env)) {
2347
        env->exception_index = -1;
2348
        cpu_loop_exit();
2349
    }
2350
#endif
2351
}
2352

    
2353
/* real and vm86 mode iret */
2354
void helper_iret_real(int shift)
2355
{
2356
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2357
    target_ulong ssp;
2358
    int eflags_mask;
2359

    
2360
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2361
    sp = ESP;
2362
    ssp = env->segs[R_SS].base;
2363
    if (shift == 1) {
2364
        /* 32 bits */
2365
        POPL(ssp, sp, sp_mask, new_eip);
2366
        POPL(ssp, sp, sp_mask, new_cs);
2367
        new_cs &= 0xffff;
2368
        POPL(ssp, sp, sp_mask, new_eflags);
2369
    } else {
2370
        /* 16 bits */
2371
        POPW(ssp, sp, sp_mask, new_eip);
2372
        POPW(ssp, sp, sp_mask, new_cs);
2373
        POPW(ssp, sp, sp_mask, new_eflags);
2374
    }
2375
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2376
    load_seg_vm(R_CS, new_cs);
2377
    env->eip = new_eip;
2378
    if (env->eflags & VM_MASK)
2379
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2380
    else
2381
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2382
    if (shift == 0)
2383
        eflags_mask &= 0xffff;
2384
    load_eflags(new_eflags, eflags_mask);
2385
}
2386

    
2387
static inline void validate_seg(int seg_reg, int cpl)
2388
{
2389
    int dpl;
2390
    uint32_t e2;
2391

    
2392
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2393
       they may still contain a valid base. I would be interested to
2394
       know how a real x86_64 CPU behaves */
2395
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2396
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2397
        return;
2398

    
2399
    e2 = env->segs[seg_reg].flags;
2400
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2401
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2402
        /* data or non conforming code segment */
2403
        if (dpl < cpl) {
2404
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2405
        }
2406
    }
2407
}
2408

    
2409
/* protected mode iret */
2410
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2411
{
2412
    uint32_t new_cs, new_eflags, new_ss;
2413
    uint32_t new_es, new_ds, new_fs, new_gs;
2414
    uint32_t e1, e2, ss_e1, ss_e2;
2415
    int cpl, dpl, rpl, eflags_mask, iopl;
2416
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2417

    
2418
#ifdef TARGET_X86_64
2419
    if (shift == 2)
2420
        sp_mask = -1;
2421
    else
2422
#endif
2423
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2424
    sp = ESP;
2425
    ssp = env->segs[R_SS].base;
2426
    new_eflags = 0; /* avoid warning */
2427
#ifdef TARGET_X86_64
2428
    if (shift == 2) {
2429
        POPQ(sp, new_eip);
2430
        POPQ(sp, new_cs);
2431
        new_cs &= 0xffff;
2432
        if (is_iret) {
2433
            POPQ(sp, new_eflags);
2434
        }
2435
    } else
2436
#endif
2437
    if (shift == 1) {
2438
        /* 32 bits */
2439
        POPL(ssp, sp, sp_mask, new_eip);
2440
        POPL(ssp, sp, sp_mask, new_cs);
2441
        new_cs &= 0xffff;
2442
        if (is_iret) {
2443
            POPL(ssp, sp, sp_mask, new_eflags);
2444
            if (new_eflags & VM_MASK)
2445
                goto return_to_vm86;
2446
        }
2447
    } else {
2448
        /* 16 bits */
2449
        POPW(ssp, sp, sp_mask, new_eip);
2450
        POPW(ssp, sp, sp_mask, new_cs);
2451
        if (is_iret)
2452
            POPW(ssp, sp, sp_mask, new_eflags);
2453
    }
2454
#ifdef DEBUG_PCALL
2455
    if (loglevel & CPU_LOG_PCALL) {
2456
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2457
                new_cs, new_eip, shift, addend);
2458
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2459
    }
2460
#endif
2461
    if ((new_cs & 0xfffc) == 0)
2462
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2463
    if (load_segment(&e1, &e2, new_cs) != 0)
2464
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2465
    if (!(e2 & DESC_S_MASK) ||
2466
        !(e2 & DESC_CS_MASK))
2467
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2468
    cpl = env->hflags & HF_CPL_MASK;
2469
    rpl = new_cs & 3;
2470
    if (rpl < cpl)
2471
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2472
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473
    if (e2 & DESC_C_MASK) {
2474
        if (dpl > rpl)
2475
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2476
    } else {
2477
        if (dpl != rpl)
2478
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2479
    }
2480
    if (!(e2 & DESC_P_MASK))
2481
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2482

    
2483
    sp += addend;
2484
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2485
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2486
        /* return to same priledge level */
2487
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2488
                       get_seg_base(e1, e2),
2489
                       get_seg_limit(e1, e2),
2490
                       e2);
2491
    } else {
2492
        /* return to different privilege level */
2493
#ifdef TARGET_X86_64
2494
        if (shift == 2) {
2495
            POPQ(sp, new_esp);
2496
            POPQ(sp, new_ss);
2497
            new_ss &= 0xffff;
2498
        } else
2499
#endif
2500
        if (shift == 1) {
2501
            /* 32 bits */
2502
            POPL(ssp, sp, sp_mask, new_esp);
2503
            POPL(ssp, sp, sp_mask, new_ss);
2504
            new_ss &= 0xffff;
2505
        } else {
2506
            /* 16 bits */
2507
            POPW(ssp, sp, sp_mask, new_esp);
2508
            POPW(ssp, sp, sp_mask, new_ss);
2509
        }
2510
#ifdef DEBUG_PCALL
2511
        if (loglevel & CPU_LOG_PCALL) {
2512
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2513
                    new_ss, new_esp);
2514
        }
2515
#endif
2516
        if ((new_ss & 0xfffc) == 0) {
2517
#ifdef TARGET_X86_64
2518
            /* NULL ss is allowed in long mode if cpl != 3*/
2519
            /* XXX: test CS64 ? */
2520
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2521
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2522
                                       0, 0xffffffff,
2523
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2524
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2525
                                       DESC_W_MASK | DESC_A_MASK);
2526
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2527
            } else
2528
#endif
2529
            {
2530
                raise_exception_err(EXCP0D_GPF, 0);
2531
            }
2532
        } else {
2533
            if ((new_ss & 3) != rpl)
2534
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2535
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2536
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2537
            if (!(ss_e2 & DESC_S_MASK) ||
2538
                (ss_e2 & DESC_CS_MASK) ||
2539
                !(ss_e2 & DESC_W_MASK))
2540
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2541
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2542
            if (dpl != rpl)
2543
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2544
            if (!(ss_e2 & DESC_P_MASK))
2545
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2546
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2547
                                   get_seg_base(ss_e1, ss_e2),
2548
                                   get_seg_limit(ss_e1, ss_e2),
2549
                                   ss_e2);
2550
        }
2551

    
2552
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2553
                       get_seg_base(e1, e2),
2554
                       get_seg_limit(e1, e2),
2555
                       e2);
2556
        cpu_x86_set_cpl(env, rpl);
2557
        sp = new_esp;
2558
#ifdef TARGET_X86_64
2559
        if (env->hflags & HF_CS64_MASK)
2560
            sp_mask = -1;
2561
        else
2562
#endif
2563
            sp_mask = get_sp_mask(ss_e2);
2564

    
2565
        /* validate data segments */
2566
        validate_seg(R_ES, rpl);
2567
        validate_seg(R_DS, rpl);
2568
        validate_seg(R_FS, rpl);
2569
        validate_seg(R_GS, rpl);
2570

    
2571
        sp += addend;
2572
    }
2573
    SET_ESP(sp, sp_mask);
2574
    env->eip = new_eip;
2575
    if (is_iret) {
2576
        /* NOTE: 'cpl' is the _old_ CPL */
2577
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2578
        if (cpl == 0)
2579
            eflags_mask |= IOPL_MASK;
2580
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2581
        if (cpl <= iopl)
2582
            eflags_mask |= IF_MASK;
2583
        if (shift == 0)
2584
            eflags_mask &= 0xffff;
2585
        load_eflags(new_eflags, eflags_mask);
2586
    }
2587
    return;
2588

    
2589
 return_to_vm86:
2590
    POPL(ssp, sp, sp_mask, new_esp);
2591
    POPL(ssp, sp, sp_mask, new_ss);
2592
    POPL(ssp, sp, sp_mask, new_es);
2593
    POPL(ssp, sp, sp_mask, new_ds);
2594
    POPL(ssp, sp, sp_mask, new_fs);
2595
    POPL(ssp, sp, sp_mask, new_gs);
2596

    
2597
    /* modify processor state */
2598
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2599
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2600
    load_seg_vm(R_CS, new_cs & 0xffff);
2601
    cpu_x86_set_cpl(env, 3);
2602
    load_seg_vm(R_SS, new_ss & 0xffff);
2603
    load_seg_vm(R_ES, new_es & 0xffff);
2604
    load_seg_vm(R_DS, new_ds & 0xffff);
2605
    load_seg_vm(R_FS, new_fs & 0xffff);
2606
    load_seg_vm(R_GS, new_gs & 0xffff);
2607

    
2608
    env->eip = new_eip & 0xffff;
2609
    ESP = new_esp;
2610
}
2611

    
2612
void helper_iret_protected(int shift, int next_eip)
2613
{
2614
    int tss_selector, type;
2615
    uint32_t e1, e2;
2616

    
2617
    /* specific case for TSS */
2618
    if (env->eflags & NT_MASK) {
2619
#ifdef TARGET_X86_64
2620
        if (env->hflags & HF_LMA_MASK)
2621
            raise_exception_err(EXCP0D_GPF, 0);
2622
#endif
2623
        tss_selector = lduw_kernel(env->tr.base + 0);
2624
        if (tss_selector & 4)
2625
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2626
        if (load_segment(&e1, &e2, tss_selector) != 0)
2627
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2628
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2629
        /* NOTE: we check both segment and busy TSS */
2630
        if (type != 3)
2631
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2632
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2633
    } else {
2634
        helper_ret_protected(shift, 1, 0);
2635
    }
2636
#ifdef USE_KQEMU
2637
    if (kqemu_is_ok(env)) {
2638
        CC_OP = CC_OP_EFLAGS;
2639
        env->exception_index = -1;
2640
        cpu_loop_exit();
2641
    }
2642
#endif
2643
}
2644

    
2645
void helper_lret_protected(int shift, int addend)
2646
{
2647
    helper_ret_protected(shift, 0, addend);
2648
#ifdef USE_KQEMU
2649
    if (kqemu_is_ok(env)) {
2650
        env->exception_index = -1;
2651
        cpu_loop_exit();
2652
    }
2653
#endif
2654
}
2655

    
2656
void helper_sysenter(void)
2657
{
2658
    if (env->sysenter_cs == 0) {
2659
        raise_exception_err(EXCP0D_GPF, 0);
2660
    }
2661
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2662
    cpu_x86_set_cpl(env, 0);
2663
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2664
                           0, 0xffffffff,
2665
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2666
                           DESC_S_MASK |
2667
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2668
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2669
                           0, 0xffffffff,
2670
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2671
                           DESC_S_MASK |
2672
                           DESC_W_MASK | DESC_A_MASK);
2673
    ESP = env->sysenter_esp;
2674
    EIP = env->sysenter_eip;
2675
}
2676

    
2677
void helper_sysexit(void)
2678
{
2679
    int cpl;
2680

    
2681
    cpl = env->hflags & HF_CPL_MASK;
2682
    if (env->sysenter_cs == 0 || cpl != 0) {
2683
        raise_exception_err(EXCP0D_GPF, 0);
2684
    }
2685
    cpu_x86_set_cpl(env, 3);
2686
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2687
                           0, 0xffffffff,
2688
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2689
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2690
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2691
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2692
                           0, 0xffffffff,
2693
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2694
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2695
                           DESC_W_MASK | DESC_A_MASK);
2696
    ESP = ECX;
2697
    EIP = EDX;
2698
#ifdef USE_KQEMU
2699
    if (kqemu_is_ok(env)) {
2700
        env->exception_index = -1;
2701
        cpu_loop_exit();
2702
    }
2703
#endif
2704
}
2705

    
2706
void helper_movl_crN_T0(int reg)
2707
{
2708
#if !defined(CONFIG_USER_ONLY)
2709
    switch(reg) {
2710
    case 0:
2711
        cpu_x86_update_cr0(env, T0);
2712
        break;
2713
    case 3:
2714
        cpu_x86_update_cr3(env, T0);
2715
        break;
2716
    case 4:
2717
        cpu_x86_update_cr4(env, T0);
2718
        break;
2719
    case 8:
2720
        cpu_set_apic_tpr(env, T0);
2721
        break;
2722
    default:
2723
        env->cr[reg] = T0;
2724
        break;
2725
    }
2726
#endif
2727
}
2728

    
2729
/* XXX: do more */
2730
void helper_movl_drN_T0(int reg)
2731
{
2732
    env->dr[reg] = T0;
2733
}
2734

    
2735
void helper_invlpg(target_ulong addr)
2736
{
2737
    cpu_x86_flush_tlb(env, addr);
2738
}
2739

    
2740
void helper_rdtsc(void)
2741
{
2742
    uint64_t val;
2743

    
2744
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2745
        raise_exception(EXCP0D_GPF);
2746
    }
2747
    val = cpu_get_tsc(env);
2748
    EAX = (uint32_t)(val);
2749
    EDX = (uint32_t)(val >> 32);
2750
}
2751

    
2752
void helper_rdpmc(void)
2753
{
2754
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2755
        raise_exception(EXCP0D_GPF);
2756
    }
2757

    
2758
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2759
        /* currently unimplemented */
2760
        raise_exception_err(EXCP06_ILLOP, 0);
2761
    }
2762
}
2763

    
2764
#if defined(CONFIG_USER_ONLY)
2765
void helper_wrmsr(void)
2766
{
2767
}
2768

    
2769
void helper_rdmsr(void)
2770
{
2771
}
2772
#else
2773
void helper_wrmsr(void)
2774
{
2775
    uint64_t val;
2776

    
2777
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2778

    
2779
    switch((uint32_t)ECX) {
2780
    case MSR_IA32_SYSENTER_CS:
2781
        env->sysenter_cs = val & 0xffff;
2782
        break;
2783
    case MSR_IA32_SYSENTER_ESP:
2784
        env->sysenter_esp = val;
2785
        break;
2786
    case MSR_IA32_SYSENTER_EIP:
2787
        env->sysenter_eip = val;
2788
        break;
2789
    case MSR_IA32_APICBASE:
2790
        cpu_set_apic_base(env, val);
2791
        break;
2792
    case MSR_EFER:
2793
        {
2794
            uint64_t update_mask;
2795
            update_mask = 0;
2796
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2797
                update_mask |= MSR_EFER_SCE;
2798
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2799
                update_mask |= MSR_EFER_LME;
2800
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2801
                update_mask |= MSR_EFER_FFXSR;
2802
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2803
                update_mask |= MSR_EFER_NXE;
2804
            env->efer = (env->efer & ~update_mask) |
2805
            (val & update_mask);
2806
        }
2807
        break;
2808
    case MSR_STAR:
2809
        env->star = val;
2810
        break;
2811
    case MSR_PAT:
2812
        env->pat = val;
2813
        break;
2814
    case MSR_VM_HSAVE_PA:
2815
        env->vm_hsave = val;
2816
        break;
2817
#ifdef TARGET_X86_64
2818
    case MSR_LSTAR:
2819
        env->lstar = val;
2820
        break;
2821
    case MSR_CSTAR:
2822
        env->cstar = val;
2823
        break;
2824
    case MSR_FMASK:
2825
        env->fmask = val;
2826
        break;
2827
    case MSR_FSBASE:
2828
        env->segs[R_FS].base = val;
2829
        break;
2830
    case MSR_GSBASE:
2831
        env->segs[R_GS].base = val;
2832
        break;
2833
    case MSR_KERNELGSBASE:
2834
        env->kernelgsbase = val;
2835
        break;
2836
#endif
2837
    default:
2838
        /* XXX: exception ? */
2839
        break;
2840
    }
2841
}
2842

    
2843
void helper_rdmsr(void)
2844
{
2845
    uint64_t val;
2846
    switch((uint32_t)ECX) {
2847
    case MSR_IA32_SYSENTER_CS:
2848
        val = env->sysenter_cs;
2849
        break;
2850
    case MSR_IA32_SYSENTER_ESP:
2851
        val = env->sysenter_esp;
2852
        break;
2853
    case MSR_IA32_SYSENTER_EIP:
2854
        val = env->sysenter_eip;
2855
        break;
2856
    case MSR_IA32_APICBASE:
2857
        val = cpu_get_apic_base(env);
2858
        break;
2859
    case MSR_EFER:
2860
        val = env->efer;
2861
        break;
2862
    case MSR_STAR:
2863
        val = env->star;
2864
        break;
2865
    case MSR_PAT:
2866
        val = env->pat;
2867
        break;
2868
    case MSR_VM_HSAVE_PA:
2869
        val = env->vm_hsave;
2870
        break;
2871
#ifdef TARGET_X86_64
2872
    case MSR_LSTAR:
2873
        val = env->lstar;
2874
        break;
2875
    case MSR_CSTAR:
2876
        val = env->cstar;
2877
        break;
2878
    case MSR_FMASK:
2879
        val = env->fmask;
2880
        break;
2881
    case MSR_FSBASE:
2882
        val = env->segs[R_FS].base;
2883
        break;
2884
    case MSR_GSBASE:
2885
        val = env->segs[R_GS].base;
2886
        break;
2887
    case MSR_KERNELGSBASE:
2888
        val = env->kernelgsbase;
2889
        break;
2890
#endif
2891
    default:
2892
        /* XXX: exception ? */
2893
        val = 0;
2894
        break;
2895
    }
2896
    EAX = (uint32_t)(val);
2897
    EDX = (uint32_t)(val >> 32);
2898
}
2899
#endif
2900

    
2901
void helper_lsl(void)
2902
{
2903
    unsigned int selector, limit;
2904
    uint32_t e1, e2, eflags;
2905
    int rpl, dpl, cpl, type;
2906

    
2907
    eflags = cc_table[CC_OP].compute_all();
2908
    selector = T0 & 0xffff;
2909
    if (load_segment(&e1, &e2, selector) != 0)
2910
        goto fail;
2911
    rpl = selector & 3;
2912
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2913
    cpl = env->hflags & HF_CPL_MASK;
2914
    if (e2 & DESC_S_MASK) {
2915
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2916
            /* conforming */
2917
        } else {
2918
            if (dpl < cpl || dpl < rpl)
2919
                goto fail;
2920
        }
2921
    } else {
2922
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2923
        switch(type) {
2924
        case 1:
2925
        case 2:
2926
        case 3:
2927
        case 9:
2928
        case 11:
2929
            break;
2930
        default:
2931
            goto fail;
2932
        }
2933
        if (dpl < cpl || dpl < rpl) {
2934
        fail:
2935
            CC_SRC = eflags & ~CC_Z;
2936
            return;
2937
        }
2938
    }
2939
    limit = get_seg_limit(e1, e2);
2940
    T1 = limit;
2941
    CC_SRC = eflags | CC_Z;
2942
}
2943

    
2944
void helper_lar(void)
2945
{
2946
    unsigned int selector;
2947
    uint32_t e1, e2, eflags;
2948
    int rpl, dpl, cpl, type;
2949

    
2950
    eflags = cc_table[CC_OP].compute_all();
2951
    selector = T0 & 0xffff;
2952
    if ((selector & 0xfffc) == 0)
2953
        goto fail;
2954
    if (load_segment(&e1, &e2, selector) != 0)
2955
        goto fail;
2956
    rpl = selector & 3;
2957
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2958
    cpl = env->hflags & HF_CPL_MASK;
2959
    if (e2 & DESC_S_MASK) {
2960
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2961
            /* conforming */
2962
        } else {
2963
            if (dpl < cpl || dpl < rpl)
2964
                goto fail;
2965
        }
2966
    } else {
2967
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2968
        switch(type) {
2969
        case 1:
2970
        case 2:
2971
        case 3:
2972
        case 4:
2973
        case 5:
2974
        case 9:
2975
        case 11:
2976
        case 12:
2977
            break;
2978
        default:
2979
            goto fail;
2980
        }
2981
        if (dpl < cpl || dpl < rpl) {
2982
        fail:
2983
            CC_SRC = eflags & ~CC_Z;
2984
            return;
2985
        }
2986
    }
2987
    T1 = e2 & 0x00f0ff00;
2988
    CC_SRC = eflags | CC_Z;
2989
}
2990

    
2991
void helper_verr(void)
2992
{
2993
    unsigned int selector;
2994
    uint32_t e1, e2, eflags;
2995
    int rpl, dpl, cpl;
2996

    
2997
    eflags = cc_table[CC_OP].compute_all();
2998
    selector = T0 & 0xffff;
2999
    if ((selector & 0xfffc) == 0)
3000
        goto fail;
3001
    if (load_segment(&e1, &e2, selector) != 0)
3002
        goto fail;
3003
    if (!(e2 & DESC_S_MASK))
3004
        goto fail;
3005
    rpl = selector & 3;
3006
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3007
    cpl = env->hflags & HF_CPL_MASK;
3008
    if (e2 & DESC_CS_MASK) {
3009
        if (!(e2 & DESC_R_MASK))
3010
            goto fail;
3011
        if (!(e2 & DESC_C_MASK)) {
3012
            if (dpl < cpl || dpl < rpl)
3013
                goto fail;
3014
        }
3015
    } else {
3016
        if (dpl < cpl || dpl < rpl) {
3017
        fail:
3018
            CC_SRC = eflags & ~CC_Z;
3019
            return;
3020
        }
3021
    }
3022
    CC_SRC = eflags | CC_Z;
3023
}
3024

    
3025
void helper_verw(void)
3026
{
3027
    unsigned int selector;
3028
    uint32_t e1, e2, eflags;
3029
    int rpl, dpl, cpl;
3030

    
3031
    eflags = cc_table[CC_OP].compute_all();
3032
    selector = T0 & 0xffff;
3033
    if ((selector & 0xfffc) == 0)
3034
        goto fail;
3035
    if (load_segment(&e1, &e2, selector) != 0)
3036
        goto fail;
3037
    if (!(e2 & DESC_S_MASK))
3038
        goto fail;
3039
    rpl = selector & 3;
3040
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3041
    cpl = env->hflags & HF_CPL_MASK;
3042
    if (e2 & DESC_CS_MASK) {
3043
        goto fail;
3044
    } else {
3045
        if (dpl < cpl || dpl < rpl)
3046
            goto fail;
3047
        if (!(e2 & DESC_W_MASK)) {
3048
        fail:
3049
            CC_SRC = eflags & ~CC_Z;
3050
            return;
3051
        }
3052
    }
3053
    CC_SRC = eflags | CC_Z;
3054
}
3055

    
3056
/* FPU helpers */
3057

    
3058
void helper_fldt_ST0_A0(void)
3059
{
3060
    int new_fpstt;
3061
    new_fpstt = (env->fpstt - 1) & 7;
3062
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3063
    env->fpstt = new_fpstt;
3064
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3065
}
3066

    
3067
void helper_fstt_ST0_A0(void)
3068
{
3069
    helper_fstt(ST0, A0);
3070
}
3071

    
3072
static void fpu_set_exception(int mask)
3073
{
3074
    env->fpus |= mask;
3075
    if (env->fpus & (~env->fpuc & FPUC_EM))
3076
        env->fpus |= FPUS_SE | FPUS_B;
3077
}
3078

    
3079
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3080
{
3081
    if (b == 0.0)
3082
        fpu_set_exception(FPUS_ZE);
3083
    return a / b;
3084
}
3085

    
3086
void fpu_raise_exception(void)
3087
{
3088
    if (env->cr[0] & CR0_NE_MASK) {
3089
        raise_exception(EXCP10_COPR);
3090
    }
3091
#if !defined(CONFIG_USER_ONLY)
3092
    else {
3093
        cpu_set_ferr(env);
3094
    }
3095
#endif
3096
}
3097

    
3098
/* BCD ops */
3099

    
3100
void helper_fbld_ST0_A0(void)
3101
{
3102
    CPU86_LDouble tmp;
3103
    uint64_t val;
3104
    unsigned int v;
3105
    int i;
3106

    
3107
    val = 0;
3108
    for(i = 8; i >= 0; i--) {
3109
        v = ldub(A0 + i);
3110
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3111
    }
3112
    tmp = val;
3113
    if (ldub(A0 + 9) & 0x80)
3114
        tmp = -tmp;
3115
    fpush();
3116
    ST0 = tmp;
3117
}
3118

    
3119
void helper_fbst_ST0_A0(void)
3120
{
3121
    int v;
3122
    target_ulong mem_ref, mem_end;
3123
    int64_t val;
3124

    
3125
    val = floatx_to_int64(ST0, &env->fp_status);
3126
    mem_ref = A0;
3127
    mem_end = mem_ref + 9;
3128
    if (val < 0) {
3129
        stb(mem_end, 0x80);
3130
        val = -val;
3131
    } else {
3132
        stb(mem_end, 0x00);
3133
    }
3134
    while (mem_ref < mem_end) {
3135
        if (val == 0)
3136
            break;
3137
        v = val % 100;
3138
        val = val / 100;
3139
        v = ((v / 10) << 4) | (v % 10);
3140
        stb(mem_ref++, v);
3141
    }
3142
    while (mem_ref < mem_end) {
3143
        stb(mem_ref++, 0);
3144
    }
3145
}
3146

    
3147
void helper_f2xm1(void)
3148
{
3149
    ST0 = pow(2.0,ST0) - 1.0;
3150
}
3151

    
3152
void helper_fyl2x(void)
3153
{
3154
    CPU86_LDouble fptemp;
3155

    
3156
    fptemp = ST0;
3157
    if (fptemp>0.0){
3158
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3159
        ST1 *= fptemp;
3160
        fpop();
3161
    } else {
3162
        env->fpus &= (~0x4700);
3163
        env->fpus |= 0x400;
3164
    }
3165
}
3166

    
3167
void helper_fptan(void)
3168
{
3169
    CPU86_LDouble fptemp;
3170

    
3171
    fptemp = ST0;
3172
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3173
        env->fpus |= 0x400;
3174
    } else {
3175
        ST0 = tan(fptemp);
3176
        fpush();
3177
        ST0 = 1.0;
3178
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3179
        /* the above code is for  |arg| < 2**52 only */
3180
    }
3181
}
3182

    
3183
void helper_fpatan(void)
3184
{
3185
    CPU86_LDouble fptemp, fpsrcop;
3186

    
3187
    fpsrcop = ST1;
3188
    fptemp = ST0;
3189
    ST1 = atan2(fpsrcop,fptemp);
3190
    fpop();
3191
}
3192

    
3193
void helper_fxtract(void)
3194
{
3195
    CPU86_LDoubleU temp;
3196
    unsigned int expdif;
3197

    
3198
    temp.d = ST0;
3199
    expdif = EXPD(temp) - EXPBIAS;
3200
    /*DP exponent bias*/
3201
    ST0 = expdif;
3202
    fpush();
3203
    BIASEXPONENT(temp);
3204
    ST0 = temp.d;
3205
}
3206

    
3207
void helper_fprem1(void)
3208
{
3209
    CPU86_LDouble dblq, fpsrcop, fptemp;
3210
    CPU86_LDoubleU fpsrcop1, fptemp1;
3211
    int expdif;
3212
    signed long long int q;
3213

    
3214
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3215
        ST0 = 0.0 / 0.0; /* NaN */
3216
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3217
        return;
3218
    }
3219

    
3220
    fpsrcop = ST0;
3221
    fptemp = ST1;
3222
    fpsrcop1.d = fpsrcop;
3223
    fptemp1.d = fptemp;
3224
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3225

    
3226
    if (expdif < 0) {
3227
        /* optimisation? taken from the AMD docs */
3228
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3229
        /* ST0 is unchanged */
3230
        return;
3231
    }
3232

    
3233
    if (expdif < 53) {
3234
        dblq = fpsrcop / fptemp;
3235
        /* round dblq towards nearest integer */
3236
        dblq = rint(dblq);
3237
        ST0 = fpsrcop - fptemp * dblq;
3238

    
3239
        /* convert dblq to q by truncating towards zero */
3240
        if (dblq < 0.0)
3241
           q = (signed long long int)(-dblq);
3242
        else
3243
           q = (signed long long int)dblq;
3244

    
3245
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3246
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3247
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3248
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3249
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3250
    } else {
3251
        env->fpus |= 0x400;  /* C2 <-- 1 */
3252
        fptemp = pow(2.0, expdif - 50);
3253
        fpsrcop = (ST0 / ST1) / fptemp;
3254
        /* fpsrcop = integer obtained by chopping */
3255
        fpsrcop = (fpsrcop < 0.0) ?
3256
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3257
        ST0 -= (ST1 * fpsrcop * fptemp);
3258
    }
3259
}
3260

    
3261
void helper_fprem(void)
3262
{
3263
    CPU86_LDouble dblq, fpsrcop, fptemp;
3264
    CPU86_LDoubleU fpsrcop1, fptemp1;
3265
    int expdif;
3266
    signed long long int q;
3267

    
3268
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3269
       ST0 = 0.0 / 0.0; /* NaN */
3270
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3271
       return;
3272
    }
3273

    
3274
    fpsrcop = (CPU86_LDouble)ST0;
3275
    fptemp = (CPU86_LDouble)ST1;
3276
    fpsrcop1.d = fpsrcop;
3277
    fptemp1.d = fptemp;
3278
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3279

    
3280
    if (expdif < 0) {
3281
        /* optimisation? taken from the AMD docs */
3282
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3283
        /* ST0 is unchanged */
3284
        return;
3285
    }
3286

    
3287
    if ( expdif < 53 ) {
3288
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3289
        /* round dblq towards zero */
3290
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3291
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3292

    
3293
        /* convert dblq to q by truncating towards zero */
3294
        if (dblq < 0.0)
3295
           q = (signed long long int)(-dblq);
3296
        else
3297
           q = (signed long long int)dblq;
3298

    
3299
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3300
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3301
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3302
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3303
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3304
    } else {
3305
        int N = 32 + (expdif % 32); /* as per AMD docs */
3306
        env->fpus |= 0x400;  /* C2 <-- 1 */
3307
        fptemp = pow(2.0, (double)(expdif - N));
3308
        fpsrcop = (ST0 / ST1) / fptemp;
3309
        /* fpsrcop = integer obtained by chopping */
3310
        fpsrcop = (fpsrcop < 0.0) ?
3311
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3312
        ST0 -= (ST1 * fpsrcop * fptemp);
3313
    }
3314
}
3315

    
3316
void helper_fyl2xp1(void)
3317
{
3318
    CPU86_LDouble fptemp;
3319

    
3320
    fptemp = ST0;
3321
    if ((fptemp+1.0)>0.0) {
3322
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3323
        ST1 *= fptemp;
3324
        fpop();
3325
    } else {
3326
        env->fpus &= (~0x4700);
3327
        env->fpus |= 0x400;
3328
    }
3329
}
3330

    
3331
void helper_fsqrt(void)
3332
{
3333
    CPU86_LDouble fptemp;
3334

    
3335
    fptemp = ST0;
3336
    if (fptemp<0.0) {
3337
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3338
        env->fpus |= 0x400;
3339
    }
3340
    ST0 = sqrt(fptemp);
3341
}
3342

    
3343
void helper_fsincos(void)
3344
{
3345
    CPU86_LDouble fptemp;
3346

    
3347
    fptemp = ST0;
3348
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3349
        env->fpus |= 0x400;
3350
    } else {
3351
        ST0 = sin(fptemp);
3352
        fpush();
3353
        ST0 = cos(fptemp);
3354
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3355
        /* the above code is for  |arg| < 2**63 only */
3356
    }
3357
}
3358

    
3359
void helper_frndint(void)
3360
{
3361
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3362
}
3363

    
3364
void helper_fscale(void)
3365
{
3366
    ST0 = ldexp (ST0, (int)(ST1));
3367
}
3368

    
3369
void helper_fsin(void)
3370
{
3371
    CPU86_LDouble fptemp;
3372

    
3373
    fptemp = ST0;
3374
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3375
        env->fpus |= 0x400;
3376
    } else {
3377
        ST0 = sin(fptemp);
3378
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3379
        /* the above code is for  |arg| < 2**53 only */
3380
    }
3381
}
3382

    
3383
void helper_fcos(void)
3384
{
3385
    CPU86_LDouble fptemp;
3386

    
3387
    fptemp = ST0;
3388
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3389
        env->fpus |= 0x400;
3390
    } else {
3391
        ST0 = cos(fptemp);
3392
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3393
        /* the above code is for  |arg5 < 2**63 only */
3394
    }
3395
}
3396

    
3397
void helper_fxam_ST0(void)
3398
{
3399
    CPU86_LDoubleU temp;
3400
    int expdif;
3401

    
3402
    temp.d = ST0;
3403

    
3404
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3405
    if (SIGND(temp))
3406
        env->fpus |= 0x200; /* C1 <-- 1 */
3407

    
3408
    /* XXX: test fptags too */
3409
    expdif = EXPD(temp);
3410
    if (expdif == MAXEXPD) {
3411
#ifdef USE_X86LDOUBLE
3412
        if (MANTD(temp) == 0x8000000000000000ULL)
3413
#else
3414
        if (MANTD(temp) == 0)
3415
#endif
3416
            env->fpus |=  0x500 /*Infinity*/;
3417
        else
3418
            env->fpus |=  0x100 /*NaN*/;
3419
    } else if (expdif == 0) {
3420
        if (MANTD(temp) == 0)
3421
            env->fpus |=  0x4000 /*Zero*/;
3422
        else
3423
            env->fpus |= 0x4400 /*Denormal*/;
3424
    } else {
3425
        env->fpus |= 0x400;
3426
    }
3427
}
3428

    
3429
void helper_fstenv(target_ulong ptr, int data32)
3430
{
3431
    int fpus, fptag, exp, i;
3432
    uint64_t mant;
3433
    CPU86_LDoubleU tmp;
3434

    
3435
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3436
    fptag = 0;
3437
    for (i=7; i>=0; i--) {
3438
        fptag <<= 2;
3439
        if (env->fptags[i]) {
3440
            fptag |= 3;
3441
        } else {
3442
            tmp.d = env->fpregs[i].d;
3443
            exp = EXPD(tmp);
3444
            mant = MANTD(tmp);
3445
            if (exp == 0 && mant == 0) {
3446
                /* zero */
3447
                fptag |= 1;
3448
            } else if (exp == 0 || exp == MAXEXPD
3449
#ifdef USE_X86LDOUBLE
3450
                       || (mant & (1LL << 63)) == 0
3451
#endif
3452
                       ) {
3453
                /* NaNs, infinity, denormal */
3454
                fptag |= 2;
3455
            }
3456
        }
3457
    }
3458
    if (data32) {
3459
        /* 32 bit */
3460
        stl(ptr, env->fpuc);
3461
        stl(ptr + 4, fpus);
3462
        stl(ptr + 8, fptag);
3463
        stl(ptr + 12, 0); /* fpip */
3464
        stl(ptr + 16, 0); /* fpcs */
3465
        stl(ptr + 20, 0); /* fpoo */
3466
        stl(ptr + 24, 0); /* fpos */
3467
    } else {
3468
        /* 16 bit */
3469
        stw(ptr, env->fpuc);
3470
        stw(ptr + 2, fpus);
3471
        stw(ptr + 4, fptag);
3472
        stw(ptr + 6, 0);
3473
        stw(ptr + 8, 0);
3474
        stw(ptr + 10, 0);
3475
        stw(ptr + 12, 0);
3476
    }
3477
}
3478

    
3479
void helper_fldenv(target_ulong ptr, int data32)
3480
{
3481
    int i, fpus, fptag;
3482

    
3483
    if (data32) {
3484
        env->fpuc = lduw(ptr);
3485
        fpus = lduw(ptr + 4);
3486
        fptag = lduw(ptr + 8);
3487
    }
3488
    else {
3489
        env->fpuc = lduw(ptr);
3490
        fpus = lduw(ptr + 2);
3491
        fptag = lduw(ptr + 4);
3492
    }
3493
    env->fpstt = (fpus >> 11) & 7;
3494
    env->fpus = fpus & ~0x3800;
3495
    for(i = 0;i < 8; i++) {
3496
        env->fptags[i] = ((fptag & 3) == 3);
3497
        fptag >>= 2;
3498
    }
3499
}
3500

    
3501
void helper_fsave(target_ulong ptr, int data32)
3502
{
3503
    CPU86_LDouble tmp;
3504
    int i;
3505

    
3506
    helper_fstenv(ptr, data32);
3507

    
3508
    ptr += (14 << data32);
3509
    for(i = 0;i < 8; i++) {
3510
        tmp = ST(i);
3511
        helper_fstt(tmp, ptr);
3512
        ptr += 10;
3513
    }
3514

    
3515
    /* fninit */
3516
    env->fpus = 0;
3517
    env->fpstt = 0;
3518
    env->fpuc = 0x37f;
3519
    env->fptags[0] = 1;
3520
    env->fptags[1] = 1;
3521
    env->fptags[2] = 1;
3522
    env->fptags[3] = 1;
3523
    env->fptags[4] = 1;
3524
    env->fptags[5] = 1;
3525
    env->fptags[6] = 1;
3526
    env->fptags[7] = 1;
3527
}
3528

    
3529
void helper_frstor(target_ulong ptr, int data32)
3530
{
3531
    CPU86_LDouble tmp;
3532
    int i;
3533

    
3534
    helper_fldenv(ptr, data32);
3535
    ptr += (14 << data32);
3536

    
3537
    for(i = 0;i < 8; i++) {
3538
        tmp = helper_fldt(ptr);
3539
        ST(i) = tmp;
3540
        ptr += 10;
3541
    }
3542
}
3543

    
3544
void helper_fxsave(target_ulong ptr, int data64)
3545
{
3546
    int fpus, fptag, i, nb_xmm_regs;
3547
    CPU86_LDouble tmp;
3548
    target_ulong addr;
3549

    
3550
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3551
    fptag = 0;
3552
    for(i = 0; i < 8; i++) {
3553
        fptag |= (env->fptags[i] << i);
3554
    }
3555
    stw(ptr, env->fpuc);
3556
    stw(ptr + 2, fpus);
3557
    stw(ptr + 4, fptag ^ 0xff);
3558

    
3559
    addr = ptr + 0x20;
3560
    for(i = 0;i < 8; i++) {
3561
        tmp = ST(i);
3562
        helper_fstt(tmp, addr);
3563
        addr += 16;
3564
    }
3565

    
3566
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3567
        /* XXX: finish it */
3568
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3569
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3570
        nb_xmm_regs = 8 << data64;
3571
        addr = ptr + 0xa0;
3572
        for(i = 0; i < nb_xmm_regs; i++) {
3573
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3574
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3575
            addr += 16;
3576
        }
3577
    }
3578
}
3579

    
3580
void helper_fxrstor(target_ulong ptr, int data64)
3581
{
3582
    int i, fpus, fptag, nb_xmm_regs;
3583
    CPU86_LDouble tmp;
3584
    target_ulong addr;
3585

    
3586
    env->fpuc = lduw(ptr);
3587
    fpus = lduw(ptr + 2);
3588
    fptag = lduw(ptr + 4);
3589
    env->fpstt = (fpus >> 11) & 7;
3590
    env->fpus = fpus & ~0x3800;
3591
    fptag ^= 0xff;
3592
    for(i = 0;i < 8; i++) {
3593
        env->fptags[i] = ((fptag >> i) & 1);
3594
    }
3595

    
3596
    addr = ptr + 0x20;
3597
    for(i = 0;i < 8; i++) {
3598
        tmp = helper_fldt(addr);
3599
        ST(i) = tmp;
3600
        addr += 16;
3601
    }
3602

    
3603
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3604
        /* XXX: finish it */
3605
        env->mxcsr = ldl(ptr + 0x18);
3606
        //ldl(ptr + 0x1c);
3607
        nb_xmm_regs = 8 << data64;
3608
        addr = ptr + 0xa0;
3609
        for(i = 0; i < nb_xmm_regs; i++) {
3610
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3611
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3612
            addr += 16;
3613
        }
3614
    }
3615
}
3616

    
3617
#ifndef USE_X86LDOUBLE
3618

    
3619
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3620
{
3621
    CPU86_LDoubleU temp;
3622
    int e;
3623

    
3624
    temp.d = f;
3625
    /* mantissa */
3626
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3627
    /* exponent + sign */
3628
    e = EXPD(temp) - EXPBIAS + 16383;
3629
    e |= SIGND(temp) >> 16;
3630
    *pexp = e;
3631
}
3632

    
3633
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3634
{
3635
    CPU86_LDoubleU temp;
3636
    int e;
3637
    uint64_t ll;
3638

    
3639
    /* XXX: handle overflow ? */
3640
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3641
    e |= (upper >> 4) & 0x800; /* sign */
3642
    ll = (mant >> 11) & ((1LL << 52) - 1);
3643
#ifdef __arm__
3644
    temp.l.upper = (e << 20) | (ll >> 32);
3645
    temp.l.lower = ll;
3646
#else
3647
    temp.ll = ll | ((uint64_t)e << 52);
3648
#endif
3649
    return temp.d;
3650
}
3651

    
3652
#else
3653

    
3654
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3655
{
3656
    CPU86_LDoubleU temp;
3657

    
3658
    temp.d = f;
3659
    *pmant = temp.l.lower;
3660
    *pexp = temp.l.upper;
3661
}
3662

    
3663
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3664
{
3665
    CPU86_LDoubleU temp;
3666

    
3667
    temp.l.upper = upper;
3668
    temp.l.lower = mant;
3669
    return temp.d;
3670
}
3671
#endif
3672

    
3673
#ifdef TARGET_X86_64
3674

    
3675
//#define DEBUG_MULDIV
3676

    
3677
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3678
{
3679
    *plow += a;
3680
    /* carry test */
3681
    if (*plow < a)
3682
        (*phigh)++;
3683
    *phigh += b;
3684
}
3685

    
3686
static void neg128(uint64_t *plow, uint64_t *phigh)
3687
{
3688
    *plow = ~ *plow;
3689
    *phigh = ~ *phigh;
3690
    add128(plow, phigh, 1, 0);
3691
}
3692

    
3693
/* return TRUE if overflow */
3694
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3695
{
3696
    uint64_t q, r, a1, a0;
3697
    int i, qb, ab;
3698

    
3699
    a0 = *plow;
3700
    a1 = *phigh;
3701
    if (a1 == 0) {
3702
        q = a0 / b;
3703
        r = a0 % b;
3704
        *plow = q;
3705
        *phigh = r;
3706
    } else {
3707
        if (a1 >= b)
3708
            return 1;
3709
        /* XXX: use a better algorithm */
3710
        for(i = 0; i < 64; i++) {
3711
            ab = a1 >> 63;
3712
            a1 = (a1 << 1) | (a0 >> 63);
3713
            if (ab || a1 >= b) {
3714
                a1 -= b;
3715
                qb = 1;
3716
            } else {
3717
                qb = 0;
3718
            }
3719
            a0 = (a0 << 1) | qb;
3720
        }
3721
#if defined(DEBUG_MULDIV)
3722
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3723
               *phigh, *plow, b, a0, a1);
3724
#endif
3725
        *plow = a0;
3726
        *phigh = a1;
3727
    }
3728
    return 0;
3729
}
3730

    
3731
/* return TRUE if overflow */
3732
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3733
{
3734
    int sa, sb;
3735
    sa = ((int64_t)*phigh < 0);
3736
    if (sa)
3737
        neg128(plow, phigh);
3738
    sb = (b < 0);
3739
    if (sb)
3740
        b = -b;
3741
    if (div64(plow, phigh, b) != 0)
3742
        return 1;
3743
    if (sa ^ sb) {
3744
        if (*plow > (1ULL << 63))
3745
            return 1;
3746
        *plow = - *plow;
3747
    } else {
3748
        if (*plow >= (1ULL << 63))
3749
            return 1;
3750
    }
3751
    if (sa)
3752
        *phigh = - *phigh;
3753
    return 0;
3754
}
3755

    
3756
void helper_mulq_EAX_T0(void)
3757
{
3758
    uint64_t r0, r1;
3759

    
3760
    mulu64(&r0, &r1, EAX, T0);
3761
    EAX = r0;
3762
    EDX = r1;
3763
    CC_DST = r0;
3764
    CC_SRC = r1;
3765
}
3766

    
3767
void helper_imulq_EAX_T0(void)
3768
{
3769
    uint64_t r0, r1;
3770

    
3771
    muls64(&r0, &r1, EAX, T0);
3772
    EAX = r0;
3773
    EDX = r1;
3774
    CC_DST = r0;
3775
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3776
}
3777

    
3778
void helper_imulq_T0_T1(void)
3779
{
3780
    uint64_t r0, r1;
3781

    
3782
    muls64(&r0, &r1, T0, T1);
3783
    T0 = r0;
3784
    CC_DST = r0;
3785
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3786
}
3787

    
3788
void helper_divq_EAX_T0(void)
3789
{
3790
    uint64_t r0, r1;
3791
    if (T0 == 0) {
3792
        raise_exception(EXCP00_DIVZ);
3793
    }
3794
    r0 = EAX;
3795
    r1 = EDX;
3796
    if (div64(&r0, &r1, T0))
3797
        raise_exception(EXCP00_DIVZ);
3798
    EAX = r0;
3799
    EDX = r1;
3800
}
3801

    
3802
void helper_idivq_EAX_T0(void)
3803
{
3804
    uint64_t r0, r1;
3805
    if (T0 == 0) {
3806
        raise_exception(EXCP00_DIVZ);
3807
    }
3808
    r0 = EAX;
3809
    r1 = EDX;
3810
    if (idiv64(&r0, &r1, T0))
3811
        raise_exception(EXCP00_DIVZ);
3812
    EAX = r0;
3813
    EDX = r1;
3814
}
3815

    
3816
void helper_bswapq_T0(void)
3817
{
3818
    T0 = bswap64(T0);
3819
}
3820
#endif
3821

    
3822
void helper_hlt(void)
3823
{
3824
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3825
    env->hflags |= HF_HALTED_MASK;
3826
    env->exception_index = EXCP_HLT;
3827
    cpu_loop_exit();
3828
}
3829

    
3830
void helper_monitor(void)
3831
{
3832
    if ((uint32_t)ECX != 0)
3833
        raise_exception(EXCP0D_GPF);
3834
    /* XXX: store address ? */
3835
}
3836

    
3837
void helper_mwait(void)
3838
{
3839
    if ((uint32_t)ECX != 0)
3840
        raise_exception(EXCP0D_GPF);
3841
    /* XXX: not complete but not completely erroneous */
3842
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3843
        /* more than one CPU: do not sleep because another CPU may
3844
           wake this one */
3845
    } else {
3846
        helper_hlt();
3847
    }
3848
}
3849

    
3850
float approx_rsqrt(float a)
3851
{
3852
    return 1.0 / sqrt(a);
3853
}
3854

    
3855
float approx_rcp(float a)
3856
{
3857
    return 1.0 / a;
3858
}
3859

    
3860
void update_fp_status(void)
3861
{
3862
    int rnd_type;
3863

    
3864
    /* set rounding mode */
3865
    switch(env->fpuc & RC_MASK) {
3866
    default:
3867
    case RC_NEAR:
3868
        rnd_type = float_round_nearest_even;
3869
        break;
3870
    case RC_DOWN:
3871
        rnd_type = float_round_down;
3872
        break;
3873
    case RC_UP:
3874
        rnd_type = float_round_up;
3875
        break;
3876
    case RC_CHOP:
3877
        rnd_type = float_round_to_zero;
3878
        break;
3879
    }
3880
    set_float_rounding_mode(rnd_type, &env->fp_status);
3881
#ifdef FLOATX80
3882
    switch((env->fpuc >> 8) & 3) {
3883
    case 0:
3884
        rnd_type = 32;
3885
        break;
3886
    case 2:
3887
        rnd_type = 64;
3888
        break;
3889
    case 3:
3890
    default:
3891
        rnd_type = 80;
3892
        break;
3893
    }
3894
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3895
#endif
3896
}
3897

    
3898
#if !defined(CONFIG_USER_ONLY)
3899

    
3900
#define MMUSUFFIX _mmu
3901
#ifdef __s390__
3902
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3903
#else
3904
# define GETPC() (__builtin_return_address(0))
3905
#endif
3906

    
3907
#define SHIFT 0
3908
#include "softmmu_template.h"
3909

    
3910
#define SHIFT 1
3911
#include "softmmu_template.h"
3912

    
3913
#define SHIFT 2
3914
#include "softmmu_template.h"
3915

    
3916
#define SHIFT 3
3917
#include "softmmu_template.h"
3918

    
3919
#endif
3920

    
3921
/* try to fill the TLB and return an exception if error. If retaddr is
3922
   NULL, it means that the function was called in C code (i.e. not
3923
   from generated code or from helper.c) */
3924
/* XXX: fix it to restore all registers */
3925
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3926
{
3927
    TranslationBlock *tb;
3928
    int ret;
3929
    unsigned long pc;
3930
    CPUX86State *saved_env;
3931

    
3932
    /* XXX: hack to restore env in all cases, even if not called from
3933
       generated code */
3934
    saved_env = env;
3935
    env = cpu_single_env;
3936

    
3937
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3938
    if (ret) {
3939
        if (retaddr) {
3940
            /* now we have a real cpu fault */
3941
            pc = (unsigned long)retaddr;
3942
            tb = tb_find_pc(pc);
3943
            if (tb) {
3944
                /* the PC is inside the translated code. It means that we have
3945
                   a virtual CPU fault */
3946
                cpu_restore_state(tb, env, pc, NULL);
3947
            }
3948
        }
3949
        if (retaddr)
3950
            raise_exception_err(env->exception_index, env->error_code);
3951
        else
3952
            raise_exception_err_norestore(env->exception_index, env->error_code);
3953
    }
3954
    env = saved_env;
3955
}
3956

    
3957

    
3958
/* Secure Virtual Machine helpers */
3959

    
3960
void helper_stgi(void)
3961
{
3962
    env->hflags |= HF_GIF_MASK;
3963
}
3964

    
3965
void helper_clgi(void)
3966
{
3967
    env->hflags &= ~HF_GIF_MASK;
3968
}
3969

    
3970
#if defined(CONFIG_USER_ONLY)
3971

    
3972
void helper_vmrun(target_ulong addr) { }
3973
void helper_vmmcall(void) { }
3974
void helper_vmload(target_ulong addr) { }
3975
void helper_vmsave(target_ulong addr) { }
3976
void helper_skinit(void) { }
3977
void helper_invlpga(void) { }
3978
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3979
int svm_check_intercept_param(uint32_t type, uint64_t param)
3980
{
3981
    return 0;
3982
}
3983

    
3984
#else
3985

    
3986
static inline uint32_t
3987
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3988
{
3989
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3990
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3991
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3992
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3993
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3994
}
3995

    
3996
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3997
{
3998
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3999
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4000
}
4001

    
4002
extern uint8_t *phys_ram_base;
4003
void helper_vmrun(target_ulong addr)
4004
{
4005
    uint32_t event_inj;
4006
    uint32_t int_ctl;
4007

    
4008
    if (loglevel & CPU_LOG_TB_IN_ASM)
4009
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4010

    
4011
    env->vm_vmcb = addr;
4012
    regs_to_env();
4013

    
4014
    /* save the current CPU state in the hsave page */
4015
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4016
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4017

    
4018
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4019
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4020

    
4021
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4022
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4023
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4024
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4025
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4026
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4027
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4028

    
4029
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4030
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4031

    
4032
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4033
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4034
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4035
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4036

    
4037
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4038
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4039
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4040

    
4041
    /* load the interception bitmaps so we do not need to access the
4042
       vmcb in svm mode */
4043
    /* We shift all the intercept bits so we can OR them with the TB
4044
       flags later on */
4045
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4046
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4047
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4048
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4049
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4050
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4051

    
4052
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4053
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4054

    
4055
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4056
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4057

    
4058
    /* clear exit_info_2 so we behave like the real hardware */
4059
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4060

    
4061
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4062
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4063
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4064
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4065
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4066
    if (int_ctl & V_INTR_MASKING_MASK) {
4067
        env->cr[8] = int_ctl & V_TPR_MASK;
4068
        if (env->eflags & IF_MASK)
4069
            env->hflags |= HF_HIF_MASK;
4070
    }
4071

    
4072
#ifdef TARGET_X86_64
4073
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4074
    env->hflags &= ~HF_LMA_MASK;
4075
    if (env->efer & MSR_EFER_LMA)
4076
       env->hflags |= HF_LMA_MASK;
4077
#endif
4078
    env->eflags = 0;
4079
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4080
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4081
    CC_OP = CC_OP_EFLAGS;
4082
    CC_DST = 0xffffffff;
4083

    
4084
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4085
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4086
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4087
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4088

    
4089
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4090
    env->eip = EIP;
4091
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4092
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4093
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4094
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4095
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4096

    
4097
    /* FIXME: guest state consistency checks */
4098

    
4099
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4100
        case TLB_CONTROL_DO_NOTHING:
4101
            break;
4102
        case TLB_CONTROL_FLUSH_ALL_ASID:
4103
            /* FIXME: this is not 100% correct but should work for now */
4104
            tlb_flush(env, 1);
4105
        break;
4106
    }
4107

    
4108
    helper_stgi();
4109

    
4110
    regs_to_env();
4111

    
4112
    /* maybe we need to inject an event */
4113
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4114
    if (event_inj & SVM_EVTINJ_VALID) {
4115
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4116
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4117
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4118
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4119

    
4120
        if (loglevel & CPU_LOG_TB_IN_ASM)
4121
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4122
        /* FIXME: need to implement valid_err */
4123
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4124
        case SVM_EVTINJ_TYPE_INTR:
4125
                env->exception_index = vector;
4126
                env->error_code = event_inj_err;
4127
                env->exception_is_int = 1;
4128
                env->exception_next_eip = -1;
4129
                if (loglevel & CPU_LOG_TB_IN_ASM)
4130
                    fprintf(logfile, "INTR");
4131
                break;
4132
        case SVM_EVTINJ_TYPE_NMI:
4133
                env->exception_index = vector;
4134
                env->error_code = event_inj_err;
4135
                env->exception_is_int = 1;
4136
                env->exception_next_eip = EIP;
4137
                if (loglevel & CPU_LOG_TB_IN_ASM)
4138
                    fprintf(logfile, "NMI");
4139
                break;
4140
        case SVM_EVTINJ_TYPE_EXEPT:
4141
                env->exception_index = vector;
4142
                env->error_code = event_inj_err;
4143
                env->exception_is_int = 0;
4144
                env->exception_next_eip = -1;
4145
                if (loglevel & CPU_LOG_TB_IN_ASM)
4146
                    fprintf(logfile, "EXEPT");
4147
                break;
4148
        case SVM_EVTINJ_TYPE_SOFT:
4149
                env->exception_index = vector;
4150
                env->error_code = event_inj_err;
4151
                env->exception_is_int = 1;
4152
                env->exception_next_eip = EIP;
4153
                if (loglevel & CPU_LOG_TB_IN_ASM)
4154
                    fprintf(logfile, "SOFT");
4155
                break;
4156
        }
4157
        if (loglevel & CPU_LOG_TB_IN_ASM)
4158
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4159
    }
4160
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4161
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4162
    }
4163

    
4164
    cpu_loop_exit();
4165
}
4166

    
4167
void helper_vmmcall(void)
4168
{
4169
    if (loglevel & CPU_LOG_TB_IN_ASM)
4170
        fprintf(logfile,"vmmcall!\n");
4171
}
4172

    
4173
void helper_vmload(target_ulong addr)
4174
{
4175
    if (loglevel & CPU_LOG_TB_IN_ASM)
4176
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4177
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4178
                env->segs[R_FS].base);
4179

    
4180
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4181
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4182
    SVM_LOAD_SEG2(addr, tr, tr);
4183
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4184

    
4185
#ifdef TARGET_X86_64
4186
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4187
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4188
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4189
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4190
#endif
4191
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4192
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4193
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4194
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4195
}
4196

    
4197
void helper_vmsave(target_ulong addr)
4198
{
4199
    if (loglevel & CPU_LOG_TB_IN_ASM)
4200
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4201
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4202
                env->segs[R_FS].base);
4203

    
4204
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4205
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4206
    SVM_SAVE_SEG(addr, tr, tr);
4207
    SVM_SAVE_SEG(addr, ldt, ldtr);
4208

    
4209
#ifdef TARGET_X86_64
4210
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4211
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4212
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4213
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4214
#endif
4215
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4216
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4217
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4218
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4219
}
4220

    
4221
void helper_skinit(void)
4222
{
4223
    if (loglevel & CPU_LOG_TB_IN_ASM)
4224
        fprintf(logfile,"skinit!\n");
4225
}
4226

    
4227
void helper_invlpga(void)
4228
{
4229
    tlb_flush(env, 0);
4230
}
4231

    
4232
int svm_check_intercept_param(uint32_t type, uint64_t param)
4233
{
4234
    switch(type) {
4235
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4236
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4237
            vmexit(type, param);
4238
            return 1;
4239
        }
4240
        break;
4241
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4242
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4243
            vmexit(type, param);
4244
            return 1;
4245
        }
4246
        break;
4247
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4248
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4249
            vmexit(type, param);
4250
            return 1;
4251
        }
4252
        break;
4253
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4254
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4255
            vmexit(type, param);
4256
            return 1;
4257
        }
4258
        break;
4259
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4260
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4261
            vmexit(type, param);
4262
            return 1;
4263
        }
4264
        break;
4265
    case SVM_EXIT_IOIO:
4266
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4267
            /* FIXME: this should be read in at vmrun (faster this way?) */
4268
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4269
            uint16_t port = (uint16_t) (param >> 16);
4270

    
4271
            uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4272
            if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4273
                vmexit(type, param);
4274
        }
4275
        break;
4276

    
4277
    case SVM_EXIT_MSR:
4278
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4279
            /* FIXME: this should be read in at vmrun (faster this way?) */
4280
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4281
            switch((uint32_t)ECX) {
4282
            case 0 ... 0x1fff:
4283
                T0 = (ECX * 2) % 8;
4284
                T1 = ECX / 8;
4285
                break;
4286
            case 0xc0000000 ... 0xc0001fff:
4287
                T0 = (8192 + ECX - 0xc0000000) * 2;
4288
                T1 = (T0 / 8);
4289
                T0 %= 8;
4290
                break;
4291
            case 0xc0010000 ... 0xc0011fff:
4292
                T0 = (16384 + ECX - 0xc0010000) * 2;
4293
                T1 = (T0 / 8);
4294
                T0 %= 8;
4295
                break;
4296
            default:
4297
                vmexit(type, param);
4298
                return 1;
4299
            }
4300
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4301
                vmexit(type, param);
4302
            return 1;
4303
        }
4304
        break;
4305
    default:
4306
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4307
            vmexit(type, param);
4308
            return 1;
4309
        }
4310
        break;
4311
    }
4312
    return 0;
4313
}
4314

    
4315
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4316
{
4317
    uint32_t int_ctl;
4318

    
4319
    if (loglevel & CPU_LOG_TB_IN_ASM)
4320
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4321
                exit_code, exit_info_1,
4322
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4323
                EIP);
4324

    
4325
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4326
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4327
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4328
    } else {
4329
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4330
    }
4331

    
4332
    /* Save the VM state in the vmcb */
4333
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4334
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4335
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4336
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4337

    
4338
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4339
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4340

    
4341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4342
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4343

    
4344
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4345
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4346
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4347
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4348
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4349

    
4350
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4351
        int_ctl &= ~V_TPR_MASK;
4352
        int_ctl |= env->cr[8] & V_TPR_MASK;
4353
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4354
    }
4355

    
4356
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4357
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4358
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4359
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4360
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4361
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4362
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4363

    
4364
    /* Reload the host state from vm_hsave */
4365
    env->hflags &= ~HF_HIF_MASK;
4366
    env->intercept = 0;
4367
    env->intercept_exceptions = 0;
4368
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4369

    
4370
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4371
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4372

    
4373
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4374
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4375

    
4376
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4377
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4378
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4379
    if (int_ctl & V_INTR_MASKING_MASK)
4380
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4381
    /* we need to set the efer after the crs so the hidden flags get set properly */
4382
#ifdef TARGET_X86_64
4383
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4384
    env->hflags &= ~HF_LMA_MASK;
4385
    if (env->efer & MSR_EFER_LMA)
4386
       env->hflags |= HF_LMA_MASK;
4387
#endif
4388

    
4389
    env->eflags = 0;
4390
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4391
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4392
    CC_OP = CC_OP_EFLAGS;
4393

    
4394
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4395
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4396
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4397
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4398

    
4399
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4400
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4401
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4402

    
4403
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4404
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4405

    
4406
    /* other setups */
4407
    cpu_x86_set_cpl(env, 0);
4408
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4409
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4410
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4411

    
4412
    helper_clgi();
4413
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4414

    
4415
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4416

    
4417
    /* Clears the TSC_OFFSET inside the processor. */
4418

    
4419
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4420
       from the page table indicated the host's CR3. If the PDPEs contain
4421
       illegal state, the processor causes a shutdown. */
4422

    
4423
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4424
    env->cr[0] |= CR0_PE_MASK;
4425
    env->eflags &= ~VM_MASK;
4426

    
4427
    /* Disables all breakpoints in the host DR7 register. */
4428

    
4429
    /* Checks the reloaded host state for consistency. */
4430

    
4431
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4432
       host's code segment or non-canonical (in the case of long mode), a
4433
       #GP fault is delivered inside the host.) */
4434

    
4435
    /* remove any pending exception */
4436
    env->exception_index = -1;
4437
    env->error_code = 0;
4438
    env->old_exception = -1;
4439

    
4440
    regs_to_env();
4441
    cpu_loop_exit();
4442
}
4443

    
4444
#endif