Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ df01e0fc

History | View | Annotate | Download (130.5 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        if (code64)
1012
            env->eip = env->lstar;
1013
        else
1014
            env->eip = env->cstar;
1015
    } else
1016
#endif
1017
    {
1018
        ECX = (uint32_t)(env->eip + next_eip_addend);
1019

    
1020
        cpu_x86_set_cpl(env, 0);
1021
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022
                           0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027
                               0, 0xffffffff,
1028
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029
                               DESC_S_MASK |
1030
                               DESC_W_MASK | DESC_A_MASK);
1031
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1032
        env->eip = (uint32_t)env->star;
1033
    }
1034
}
1035
#endif
1036

    
1037
void helper_sysret(int dflag)
1038
{
1039
    int cpl, selector;
1040

    
1041
    if (!(env->efer & MSR_EFER_SCE)) {
1042
        raise_exception_err(EXCP06_ILLOP, 0);
1043
    }
1044
    cpl = env->hflags & HF_CPL_MASK;
1045
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046
        raise_exception_err(EXCP0D_GPF, 0);
1047
    }
1048
    selector = (env->star >> 48) & 0xffff;
1049
#ifdef TARGET_X86_64
1050
    if (env->hflags & HF_LMA_MASK) {
1051
        if (dflag == 2) {
1052
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1053
                                   0, 0xffffffff,
1054
                                   DESC_G_MASK | DESC_P_MASK |
1055
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1057
                                   DESC_L_MASK);
1058
            env->eip = ECX;
1059
        } else {
1060
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1061
                                   0, 0xffffffff,
1062
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065
            env->eip = (uint32_t)ECX;
1066
        }
1067
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1068
                               0, 0xffffffff,
1069
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071
                               DESC_W_MASK | DESC_A_MASK);
1072
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1073
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1074
        cpu_x86_set_cpl(env, 3);
1075
    } else
1076
#endif
1077
    {
1078
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079
                               0, 0xffffffff,
1080
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083
        env->eip = (uint32_t)ECX;
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        env->eflags |= IF_MASK;
1090
        cpu_x86_set_cpl(env, 3);
1091
    }
1092
#ifdef USE_KQEMU
1093
    if (kqemu_is_ok(env)) {
1094
        if (env->hflags & HF_LMA_MASK)
1095
            CC_OP = CC_OP_EFLAGS;
1096
        env->exception_index = -1;
1097
        cpu_loop_exit();
1098
    }
1099
#endif
1100
}
1101

    
1102
/* real mode interrupt */
1103
static void do_interrupt_real(int intno, int is_int, int error_code,
1104
                              unsigned int next_eip)
1105
{
1106
    SegmentCache *dt;
1107
    target_ulong ptr, ssp;
1108
    int selector;
1109
    uint32_t offset, esp;
1110
    uint32_t old_cs, old_eip;
1111
    int svm_should_check = 1;
1112

    
1113
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1114
        next_eip = EIP;
1115
        svm_should_check = 0;
1116
    }
1117
    if (svm_should_check
1118
        && INTERCEPTEDl(_exceptions, 1 << intno)
1119
        && !is_int) {
1120
        raise_interrupt(intno, is_int, error_code, 0);
1121
    }
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privledge if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    char first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    char second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: %x new %x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    }
1280

    
1281
    env->exception_index = intno;
1282
    env->error_code = error_code;
1283
    env->exception_is_int = is_int;
1284
    env->exception_next_eip = env->eip + next_eip_addend;
1285
    cpu_loop_exit();
1286
}
1287

    
1288
/* same as raise_exception_err, but do not restore global registers */
1289
static void raise_exception_err_norestore(int exception_index, int error_code)
1290
{
1291
    exception_index = check_exception(exception_index, &error_code);
1292

    
1293
    env->exception_index = exception_index;
1294
    env->error_code = error_code;
1295
    env->exception_is_int = 0;
1296
    env->exception_next_eip = 0;
1297
    longjmp(env->jmp_env, 1);
1298
}
1299

    
1300
/* shortcuts to generate exceptions */
1301

    
1302
void (raise_exception_err)(int exception_index, int error_code)
1303
{
1304
    raise_interrupt(exception_index, 0, error_code, 0);
1305
}
1306

    
1307
void raise_exception(int exception_index)
1308
{
1309
    raise_interrupt(exception_index, 0, 0, 0);
1310
}
1311

    
1312
/* SMM support */
1313

    
1314
#if defined(CONFIG_USER_ONLY)
1315

    
1316
void do_smm_enter(void)
1317
{
1318
}
1319

    
1320
void helper_rsm(void)
1321
{
1322
}
1323

    
1324
#else
1325

    
1326
#ifdef TARGET_X86_64
1327
#define SMM_REVISION_ID 0x00020064
1328
#else
1329
#define SMM_REVISION_ID 0x00020000
1330
#endif
1331

    
1332
void do_smm_enter(void)
1333
{
1334
    target_ulong sm_state;
1335
    SegmentCache *dt;
1336
    int i, offset;
1337

    
1338
    if (loglevel & CPU_LOG_INT) {
1339
        fprintf(logfile, "SMM: enter\n");
1340
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1341
    }
1342

    
1343
    env->hflags |= HF_SMM_MASK;
1344
    cpu_smm_update(env);
1345

    
1346
    sm_state = env->smbase + 0x8000;
1347

    
1348
#ifdef TARGET_X86_64
1349
    for(i = 0; i < 6; i++) {
1350
        dt = &env->segs[i];
1351
        offset = 0x7e00 + i * 16;
1352
        stw_phys(sm_state + offset, dt->selector);
1353
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1354
        stl_phys(sm_state + offset + 4, dt->limit);
1355
        stq_phys(sm_state + offset + 8, dt->base);
1356
    }
1357

    
1358
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1359
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1360

    
1361
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1362
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1363
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1364
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1365

    
1366
    stq_phys(sm_state + 0x7e88, env->idt.base);
1367
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1368

    
1369
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1370
    stq_phys(sm_state + 0x7e98, env->tr.base);
1371
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1372
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1373

    
1374
    stq_phys(sm_state + 0x7ed0, env->efer);
1375

    
1376
    stq_phys(sm_state + 0x7ff8, EAX);
1377
    stq_phys(sm_state + 0x7ff0, ECX);
1378
    stq_phys(sm_state + 0x7fe8, EDX);
1379
    stq_phys(sm_state + 0x7fe0, EBX);
1380
    stq_phys(sm_state + 0x7fd8, ESP);
1381
    stq_phys(sm_state + 0x7fd0, EBP);
1382
    stq_phys(sm_state + 0x7fc8, ESI);
1383
    stq_phys(sm_state + 0x7fc0, EDI);
1384
    for(i = 8; i < 16; i++)
1385
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1386
    stq_phys(sm_state + 0x7f78, env->eip);
1387
    stl_phys(sm_state + 0x7f70, compute_eflags());
1388
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1389
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1390

    
1391
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1392
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1393
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1394

    
1395
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1396
    stl_phys(sm_state + 0x7f00, env->smbase);
1397
#else
1398
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1399
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1400
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1401
    stl_phys(sm_state + 0x7ff0, env->eip);
1402
    stl_phys(sm_state + 0x7fec, EDI);
1403
    stl_phys(sm_state + 0x7fe8, ESI);
1404
    stl_phys(sm_state + 0x7fe4, EBP);
1405
    stl_phys(sm_state + 0x7fe0, ESP);
1406
    stl_phys(sm_state + 0x7fdc, EBX);
1407
    stl_phys(sm_state + 0x7fd8, EDX);
1408
    stl_phys(sm_state + 0x7fd4, ECX);
1409
    stl_phys(sm_state + 0x7fd0, EAX);
1410
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1411
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1412

    
1413
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1414
    stl_phys(sm_state + 0x7f64, env->tr.base);
1415
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1416
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1417

    
1418
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1419
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1420
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1421
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1424
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1425

    
1426
    stl_phys(sm_state + 0x7f58, env->idt.base);
1427
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1428

    
1429
    for(i = 0; i < 6; i++) {
1430
        dt = &env->segs[i];
1431
        if (i < 3)
1432
            offset = 0x7f84 + i * 12;
1433
        else
1434
            offset = 0x7f2c + (i - 3) * 12;
1435
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1436
        stl_phys(sm_state + offset + 8, dt->base);
1437
        stl_phys(sm_state + offset + 4, dt->limit);
1438
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1439
    }
1440
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1441

    
1442
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443
    stl_phys(sm_state + 0x7ef8, env->smbase);
1444
#endif
1445
    /* init SMM cpu state */
1446

    
1447
#ifdef TARGET_X86_64
1448
    env->efer = 0;
1449
    env->hflags &= ~HF_LMA_MASK;
1450
#endif
1451
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1452
    env->eip = 0x00008000;
1453
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1454
                           0xffffffff, 0);
1455
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1460

    
1461
    cpu_x86_update_cr0(env,
1462
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1463
    cpu_x86_update_cr4(env, 0);
1464
    env->dr[7] = 0x00000400;
1465
    CC_OP = CC_OP_EFLAGS;
1466
}
1467

    
1468
void helper_rsm(void)
1469
{
1470
    target_ulong sm_state;
1471
    int i, offset;
1472
    uint32_t val;
1473

    
1474
    sm_state = env->smbase + 0x8000;
1475
#ifdef TARGET_X86_64
1476
    env->efer = ldq_phys(sm_state + 0x7ed0);
1477
    if (env->efer & MSR_EFER_LMA)
1478
        env->hflags |= HF_LMA_MASK;
1479
    else
1480
        env->hflags &= ~HF_LMA_MASK;
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
#ifdef BUGGY_GCC_DIV64
1596
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597
   call it from another function */
1598
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1599
{
1600
    *q_ptr = num / den;
1601
    return num % den;
1602
}
1603

    
1604
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1605
{
1606
    *q_ptr = num / den;
1607
    return num % den;
1608
}
1609
#endif
1610

    
1611
void helper_divl_EAX_T0(void)
1612
{
1613
    unsigned int den, r;
1614
    uint64_t num, q;
1615

    
1616
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1617
    den = T0;
1618
    if (den == 0) {
1619
        raise_exception(EXCP00_DIVZ);
1620
    }
1621
#ifdef BUGGY_GCC_DIV64
1622
    r = div32(&q, num, den);
1623
#else
1624
    q = (num / den);
1625
    r = (num % den);
1626
#endif
1627
    if (q > 0xffffffff)
1628
        raise_exception(EXCP00_DIVZ);
1629
    EAX = (uint32_t)q;
1630
    EDX = (uint32_t)r;
1631
}
1632

    
1633
void helper_idivl_EAX_T0(void)
1634
{
1635
    int den, r;
1636
    int64_t num, q;
1637

    
1638
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1639
    den = T0;
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
#ifdef BUGGY_GCC_DIV64
1644
    r = idiv32(&q, num, den);
1645
#else
1646
    q = (num / den);
1647
    r = (num % den);
1648
#endif
1649
    if (q != (int32_t)q)
1650
        raise_exception(EXCP00_DIVZ);
1651
    EAX = (uint32_t)q;
1652
    EDX = (uint32_t)r;
1653
}
1654

    
1655
void helper_cmpxchg8b(void)
1656
{
1657
    uint64_t d;
1658
    int eflags;
1659

    
1660
    eflags = cc_table[CC_OP].compute_all();
1661
    d = ldq(A0);
1662
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1663
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1664
        eflags |= CC_Z;
1665
    } else {
1666
        EDX = d >> 32;
1667
        EAX = d;
1668
        eflags &= ~CC_Z;
1669
    }
1670
    CC_SRC = eflags;
1671
}
1672

    
1673
void helper_single_step()
1674
{
1675
    env->dr[6] |= 0x4000;
1676
    raise_exception(EXCP01_SSTP);
1677
}
1678

    
1679
void helper_cpuid(void)
1680
{
1681
    uint32_t index;
1682
    index = (uint32_t)EAX;
1683

    
1684
    /* test if maximum index reached */
1685
    if (index & 0x80000000) {
1686
        if (index > env->cpuid_xlevel)
1687
            index = env->cpuid_level;
1688
    } else {
1689
        if (index > env->cpuid_level)
1690
            index = env->cpuid_level;
1691
    }
1692

    
1693
    switch(index) {
1694
    case 0:
1695
        EAX = env->cpuid_level;
1696
        EBX = env->cpuid_vendor1;
1697
        EDX = env->cpuid_vendor2;
1698
        ECX = env->cpuid_vendor3;
1699
        break;
1700
    case 1:
1701
        EAX = env->cpuid_version;
1702
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703
        ECX = env->cpuid_ext_features;
1704
        EDX = env->cpuid_features;
1705
        break;
1706
    case 2:
1707
        /* cache info: needed for Pentium Pro compatibility */
1708
        EAX = 1;
1709
        EBX = 0;
1710
        ECX = 0;
1711
        EDX = 0x2c307d;
1712
        break;
1713
    case 0x80000000:
1714
        EAX = env->cpuid_xlevel;
1715
        EBX = env->cpuid_vendor1;
1716
        EDX = env->cpuid_vendor2;
1717
        ECX = env->cpuid_vendor3;
1718
        break;
1719
    case 0x80000001:
1720
        EAX = env->cpuid_features;
1721
        EBX = 0;
1722
        ECX = env->cpuid_ext3_features;
1723
        EDX = env->cpuid_ext2_features;
1724
        break;
1725
    case 0x80000002:
1726
    case 0x80000003:
1727
    case 0x80000004:
1728
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1729
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1730
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1731
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1732
        break;
1733
    case 0x80000005:
1734
        /* cache info (L1 cache) */
1735
        EAX = 0x01ff01ff;
1736
        EBX = 0x01ff01ff;
1737
        ECX = 0x40020140;
1738
        EDX = 0x40020140;
1739
        break;
1740
    case 0x80000006:
1741
        /* cache info (L2 cache) */
1742
        EAX = 0;
1743
        EBX = 0x42004200;
1744
        ECX = 0x02008140;
1745
        EDX = 0;
1746
        break;
1747
    case 0x80000008:
1748
        /* virtual & phys address size in low 2 bytes. */
1749
        EAX = 0x00003028;
1750
        EBX = 0;
1751
        ECX = 0;
1752
        EDX = 0;
1753
        break;
1754
    default:
1755
        /* reserved values: zero */
1756
        EAX = 0;
1757
        EBX = 0;
1758
        ECX = 0;
1759
        EDX = 0;
1760
        break;
1761
    }
1762
}
1763

    
1764
void helper_enter_level(int level, int data32)
1765
{
1766
    target_ulong ssp;
1767
    uint32_t esp_mask, esp, ebp;
1768

    
1769
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1770
    ssp = env->segs[R_SS].base;
1771
    ebp = EBP;
1772
    esp = ESP;
1773
    if (data32) {
1774
        /* 32 bit */
1775
        esp -= 4;
1776
        while (--level) {
1777
            esp -= 4;
1778
            ebp -= 4;
1779
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1780
        }
1781
        esp -= 4;
1782
        stl(ssp + (esp & esp_mask), T1);
1783
    } else {
1784
        /* 16 bit */
1785
        esp -= 2;
1786
        while (--level) {
1787
            esp -= 2;
1788
            ebp -= 2;
1789
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1790
        }
1791
        esp -= 2;
1792
        stw(ssp + (esp & esp_mask), T1);
1793
    }
1794
}
1795

    
1796
#ifdef TARGET_X86_64
1797
void helper_enter64_level(int level, int data64)
1798
{
1799
    target_ulong esp, ebp;
1800
    ebp = EBP;
1801
    esp = ESP;
1802

    
1803
    if (data64) {
1804
        /* 64 bit */
1805
        esp -= 8;
1806
        while (--level) {
1807
            esp -= 8;
1808
            ebp -= 8;
1809
            stq(esp, ldq(ebp));
1810
        }
1811
        esp -= 8;
1812
        stq(esp, T1);
1813
    } else {
1814
        /* 16 bit */
1815
        esp -= 2;
1816
        while (--level) {
1817
            esp -= 2;
1818
            ebp -= 2;
1819
            stw(esp, lduw(ebp));
1820
        }
1821
        esp -= 2;
1822
        stw(esp, T1);
1823
    }
1824
}
1825
#endif
1826

    
1827
void helper_lldt_T0(void)
1828
{
1829
    int selector;
1830
    SegmentCache *dt;
1831
    uint32_t e1, e2;
1832
    int index, entry_limit;
1833
    target_ulong ptr;
1834

    
1835
    selector = T0 & 0xffff;
1836
    if ((selector & 0xfffc) == 0) {
1837
        /* XXX: NULL selector case: invalid LDT */
1838
        env->ldt.base = 0;
1839
        env->ldt.limit = 0;
1840
    } else {
1841
        if (selector & 0x4)
1842
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1843
        dt = &env->gdt;
1844
        index = selector & ~7;
1845
#ifdef TARGET_X86_64
1846
        if (env->hflags & HF_LMA_MASK)
1847
            entry_limit = 15;
1848
        else
1849
#endif
1850
            entry_limit = 7;
1851
        if ((index + entry_limit) > dt->limit)
1852
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1853
        ptr = dt->base + index;
1854
        e1 = ldl_kernel(ptr);
1855
        e2 = ldl_kernel(ptr + 4);
1856
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1857
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1858
        if (!(e2 & DESC_P_MASK))
1859
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1860
#ifdef TARGET_X86_64
1861
        if (env->hflags & HF_LMA_MASK) {
1862
            uint32_t e3;
1863
            e3 = ldl_kernel(ptr + 8);
1864
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1865
            env->ldt.base |= (target_ulong)e3 << 32;
1866
        } else
1867
#endif
1868
        {
1869
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1870
        }
1871
    }
1872
    env->ldt.selector = selector;
1873
}
1874

    
1875
void helper_ltr_T0(void)
1876
{
1877
    int selector;
1878
    SegmentCache *dt;
1879
    uint32_t e1, e2;
1880
    int index, type, entry_limit;
1881
    target_ulong ptr;
1882

    
1883
    selector = T0 & 0xffff;
1884
    if ((selector & 0xfffc) == 0) {
1885
        /* NULL selector case: invalid TR */
1886
        env->tr.base = 0;
1887
        env->tr.limit = 0;
1888
        env->tr.flags = 0;
1889
    } else {
1890
        if (selector & 0x4)
1891
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1892
        dt = &env->gdt;
1893
        index = selector & ~7;
1894
#ifdef TARGET_X86_64
1895
        if (env->hflags & HF_LMA_MASK)
1896
            entry_limit = 15;
1897
        else
1898
#endif
1899
            entry_limit = 7;
1900
        if ((index + entry_limit) > dt->limit)
1901
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1902
        ptr = dt->base + index;
1903
        e1 = ldl_kernel(ptr);
1904
        e2 = ldl_kernel(ptr + 4);
1905
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1906
        if ((e2 & DESC_S_MASK) ||
1907
            (type != 1 && type != 9))
1908
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1909
        if (!(e2 & DESC_P_MASK))
1910
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1911
#ifdef TARGET_X86_64
1912
        if (env->hflags & HF_LMA_MASK) {
1913
            uint32_t e3, e4;
1914
            e3 = ldl_kernel(ptr + 8);
1915
            e4 = ldl_kernel(ptr + 12);
1916
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1917
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1918
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1919
            env->tr.base |= (target_ulong)e3 << 32;
1920
        } else
1921
#endif
1922
        {
1923
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1924
        }
1925
        e2 |= DESC_TSS_BUSY_MASK;
1926
        stl_kernel(ptr + 4, e2);
1927
    }
1928
    env->tr.selector = selector;
1929
}
1930

    
1931
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1932
void load_seg(int seg_reg, int selector)
1933
{
1934
    uint32_t e1, e2;
1935
    int cpl, dpl, rpl;
1936
    SegmentCache *dt;
1937
    int index;
1938
    target_ulong ptr;
1939

    
1940
    selector &= 0xffff;
1941
    cpl = env->hflags & HF_CPL_MASK;
1942
    if ((selector & 0xfffc) == 0) {
1943
        /* null selector case */
1944
        if (seg_reg == R_SS
1945
#ifdef TARGET_X86_64
1946
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1947
#endif
1948
            )
1949
            raise_exception_err(EXCP0D_GPF, 0);
1950
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1951
    } else {
1952

    
1953
        if (selector & 0x4)
1954
            dt = &env->ldt;
1955
        else
1956
            dt = &env->gdt;
1957
        index = selector & ~7;
1958
        if ((index + 7) > dt->limit)
1959
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1960
        ptr = dt->base + index;
1961
        e1 = ldl_kernel(ptr);
1962
        e2 = ldl_kernel(ptr + 4);
1963

    
1964
        if (!(e2 & DESC_S_MASK))
1965
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1966
        rpl = selector & 3;
1967
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1968
        if (seg_reg == R_SS) {
1969
            /* must be writable segment */
1970
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1971
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1972
            if (rpl != cpl || dpl != cpl)
1973
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1974
        } else {
1975
            /* must be readable segment */
1976
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1977
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1978

    
1979
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1980
                /* if not conforming code, test rights */
1981
                if (dpl < cpl || dpl < rpl)
1982
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1983
            }
1984
        }
1985

    
1986
        if (!(e2 & DESC_P_MASK)) {
1987
            if (seg_reg == R_SS)
1988
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1989
            else
1990
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1991
        }
1992

    
1993
        /* set the access bit if not already set */
1994
        if (!(e2 & DESC_A_MASK)) {
1995
            e2 |= DESC_A_MASK;
1996
            stl_kernel(ptr + 4, e2);
1997
        }
1998

    
1999
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2000
                       get_seg_base(e1, e2),
2001
                       get_seg_limit(e1, e2),
2002
                       e2);
2003
#if 0
2004
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2005
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2006
#endif
2007
    }
2008
}
2009

    
2010
/* protected mode jump */
2011
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2012
{
2013
    int new_cs, gate_cs, type;
2014
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2015
    target_ulong new_eip, next_eip;
2016

    
2017
    new_cs = T0;
2018
    new_eip = T1;
2019
    if ((new_cs & 0xfffc) == 0)
2020
        raise_exception_err(EXCP0D_GPF, 0);
2021
    if (load_segment(&e1, &e2, new_cs) != 0)
2022
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2023
    cpl = env->hflags & HF_CPL_MASK;
2024
    if (e2 & DESC_S_MASK) {
2025
        if (!(e2 & DESC_CS_MASK))
2026
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2027
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2028
        if (e2 & DESC_C_MASK) {
2029
            /* conforming code segment */
2030
            if (dpl > cpl)
2031
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2032
        } else {
2033
            /* non conforming code segment */
2034
            rpl = new_cs & 3;
2035
            if (rpl > cpl)
2036
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2037
            if (dpl != cpl)
2038
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2039
        }
2040
        if (!(e2 & DESC_P_MASK))
2041
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2042
        limit = get_seg_limit(e1, e2);
2043
        if (new_eip > limit &&
2044
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2045
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2046
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2047
                       get_seg_base(e1, e2), limit, e2);
2048
        EIP = new_eip;
2049
    } else {
2050
        /* jump to call or task gate */
2051
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2052
        rpl = new_cs & 3;
2053
        cpl = env->hflags & HF_CPL_MASK;
2054
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2055
        switch(type) {
2056
        case 1: /* 286 TSS */
2057
        case 9: /* 386 TSS */
2058
        case 5: /* task gate */
2059
            if (dpl < cpl || dpl < rpl)
2060
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2061
            next_eip = env->eip + next_eip_addend;
2062
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2063
            CC_OP = CC_OP_EFLAGS;
2064
            break;
2065
        case 4: /* 286 call gate */
2066
        case 12: /* 386 call gate */
2067
            if ((dpl < cpl) || (dpl < rpl))
2068
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2069
            if (!(e2 & DESC_P_MASK))
2070
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2071
            gate_cs = e1 >> 16;
2072
            new_eip = (e1 & 0xffff);
2073
            if (type == 12)
2074
                new_eip |= (e2 & 0xffff0000);
2075
            if (load_segment(&e1, &e2, gate_cs) != 0)
2076
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2077
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2078
            /* must be code segment */
2079
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2080
                 (DESC_S_MASK | DESC_CS_MASK)))
2081
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2082
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2083
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2084
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2085
            if (!(e2 & DESC_P_MASK))
2086
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2087
            limit = get_seg_limit(e1, e2);
2088
            if (new_eip > limit)
2089
                raise_exception_err(EXCP0D_GPF, 0);
2090
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2091
                                   get_seg_base(e1, e2), limit, e2);
2092
            EIP = new_eip;
2093
            break;
2094
        default:
2095
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2096
            break;
2097
        }
2098
    }
2099
}
2100

    
2101
/* real mode call */
2102
void helper_lcall_real_T0_T1(int shift, int next_eip)
2103
{
2104
    int new_cs, new_eip;
2105
    uint32_t esp, esp_mask;
2106
    target_ulong ssp;
2107

    
2108
    new_cs = T0;
2109
    new_eip = T1;
2110
    esp = ESP;
2111
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2112
    ssp = env->segs[R_SS].base;
2113
    if (shift) {
2114
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2115
        PUSHL(ssp, esp, esp_mask, next_eip);
2116
    } else {
2117
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2118
        PUSHW(ssp, esp, esp_mask, next_eip);
2119
    }
2120

    
2121
    SET_ESP(esp, esp_mask);
2122
    env->eip = new_eip;
2123
    env->segs[R_CS].selector = new_cs;
2124
    env->segs[R_CS].base = (new_cs << 4);
2125
}
2126

    
2127
/* protected mode call */
2128
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2129
{
2130
    int new_cs, new_stack, i;
2131
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2132
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2133
    uint32_t val, limit, old_sp_mask;
2134
    target_ulong ssp, old_ssp, next_eip, new_eip;
2135

    
2136
    new_cs = T0;
2137
    new_eip = T1;
2138
    next_eip = env->eip + next_eip_addend;
2139
#ifdef DEBUG_PCALL
2140
    if (loglevel & CPU_LOG_PCALL) {
2141
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2142
                new_cs, (uint32_t)new_eip, shift);
2143
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2144
    }
2145
#endif
2146
    if ((new_cs & 0xfffc) == 0)
2147
        raise_exception_err(EXCP0D_GPF, 0);
2148
    if (load_segment(&e1, &e2, new_cs) != 0)
2149
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2150
    cpl = env->hflags & HF_CPL_MASK;
2151
#ifdef DEBUG_PCALL
2152
    if (loglevel & CPU_LOG_PCALL) {
2153
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2154
    }
2155
#endif
2156
    if (e2 & DESC_S_MASK) {
2157
        if (!(e2 & DESC_CS_MASK))
2158
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2159
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2160
        if (e2 & DESC_C_MASK) {
2161
            /* conforming code segment */
2162
            if (dpl > cpl)
2163
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2164
        } else {
2165
            /* non conforming code segment */
2166
            rpl = new_cs & 3;
2167
            if (rpl > cpl)
2168
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
            if (dpl != cpl)
2170
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171
        }
2172
        if (!(e2 & DESC_P_MASK))
2173
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2174

    
2175
#ifdef TARGET_X86_64
2176
        /* XXX: check 16/32 bit cases in long mode */
2177
        if (shift == 2) {
2178
            target_ulong rsp;
2179
            /* 64 bit case */
2180
            rsp = ESP;
2181
            PUSHQ(rsp, env->segs[R_CS].selector);
2182
            PUSHQ(rsp, next_eip);
2183
            /* from this point, not restartable */
2184
            ESP = rsp;
2185
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2186
                                   get_seg_base(e1, e2),
2187
                                   get_seg_limit(e1, e2), e2);
2188
            EIP = new_eip;
2189
        } else
2190
#endif
2191
        {
2192
            sp = ESP;
2193
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2194
            ssp = env->segs[R_SS].base;
2195
            if (shift) {
2196
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2197
                PUSHL(ssp, sp, sp_mask, next_eip);
2198
            } else {
2199
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2200
                PUSHW(ssp, sp, sp_mask, next_eip);
2201
            }
2202

    
2203
            limit = get_seg_limit(e1, e2);
2204
            if (new_eip > limit)
2205
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2206
            /* from this point, not restartable */
2207
            SET_ESP(sp, sp_mask);
2208
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2209
                                   get_seg_base(e1, e2), limit, e2);
2210
            EIP = new_eip;
2211
        }
2212
    } else {
2213
        /* check gate type */
2214
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2215
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2216
        rpl = new_cs & 3;
2217
        switch(type) {
2218
        case 1: /* available 286 TSS */
2219
        case 9: /* available 386 TSS */
2220
        case 5: /* task gate */
2221
            if (dpl < cpl || dpl < rpl)
2222
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2224
            CC_OP = CC_OP_EFLAGS;
2225
            return;
2226
        case 4: /* 286 call gate */
2227
        case 12: /* 386 call gate */
2228
            break;
2229
        default:
2230
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231
            break;
2232
        }
2233
        shift = type >> 3;
2234

    
2235
        if (dpl < cpl || dpl < rpl)
2236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
        /* check valid bit */
2238
        if (!(e2 & DESC_P_MASK))
2239
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2240
        selector = e1 >> 16;
2241
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2242
        param_count = e2 & 0x1f;
2243
        if ((selector & 0xfffc) == 0)
2244
            raise_exception_err(EXCP0D_GPF, 0);
2245

    
2246
        if (load_segment(&e1, &e2, selector) != 0)
2247
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2249
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2250
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2251
        if (dpl > cpl)
2252
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2253
        if (!(e2 & DESC_P_MASK))
2254
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2255

    
2256
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2257
            /* to inner privilege */
2258
            get_ss_esp_from_tss(&ss, &sp, dpl);
2259
#ifdef DEBUG_PCALL
2260
            if (loglevel & CPU_LOG_PCALL)
2261
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2262
                        ss, sp, param_count, ESP);
2263
#endif
2264
            if ((ss & 0xfffc) == 0)
2265
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2266
            if ((ss & 3) != dpl)
2267
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2268
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2269
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2270
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2271
            if (ss_dpl != dpl)
2272
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2273
            if (!(ss_e2 & DESC_S_MASK) ||
2274
                (ss_e2 & DESC_CS_MASK) ||
2275
                !(ss_e2 & DESC_W_MASK))
2276
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2277
            if (!(ss_e2 & DESC_P_MASK))
2278
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2279

    
2280
            //            push_size = ((param_count * 2) + 8) << shift;
2281

    
2282
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2283
            old_ssp = env->segs[R_SS].base;
2284

    
2285
            sp_mask = get_sp_mask(ss_e2);
2286
            ssp = get_seg_base(ss_e1, ss_e2);
2287
            if (shift) {
2288
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2289
                PUSHL(ssp, sp, sp_mask, ESP);
2290
                for(i = param_count - 1; i >= 0; i--) {
2291
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2292
                    PUSHL(ssp, sp, sp_mask, val);
2293
                }
2294
            } else {
2295
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296
                PUSHW(ssp, sp, sp_mask, ESP);
2297
                for(i = param_count - 1; i >= 0; i--) {
2298
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2299
                    PUSHW(ssp, sp, sp_mask, val);
2300
                }
2301
            }
2302
            new_stack = 1;
2303
        } else {
2304
            /* to same privilege */
2305
            sp = ESP;
2306
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2307
            ssp = env->segs[R_SS].base;
2308
            //            push_size = (4 << shift);
2309
            new_stack = 0;
2310
        }
2311

    
2312
        if (shift) {
2313
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2314
            PUSHL(ssp, sp, sp_mask, next_eip);
2315
        } else {
2316
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2317
            PUSHW(ssp, sp, sp_mask, next_eip);
2318
        }
2319

    
2320
        /* from this point, not restartable */
2321

    
2322
        if (new_stack) {
2323
            ss = (ss & ~3) | dpl;
2324
            cpu_x86_load_seg_cache(env, R_SS, ss,
2325
                                   ssp,
2326
                                   get_seg_limit(ss_e1, ss_e2),
2327
                                   ss_e2);
2328
        }
2329

    
2330
        selector = (selector & ~3) | dpl;
2331
        cpu_x86_load_seg_cache(env, R_CS, selector,
2332
                       get_seg_base(e1, e2),
2333
                       get_seg_limit(e1, e2),
2334
                       e2);
2335
        cpu_x86_set_cpl(env, dpl);
2336
        SET_ESP(sp, sp_mask);
2337
        EIP = offset;
2338
    }
2339
#ifdef USE_KQEMU
2340
    if (kqemu_is_ok(env)) {
2341
        env->exception_index = -1;
2342
        cpu_loop_exit();
2343
    }
2344
#endif
2345
}
2346

    
2347
/* real and vm86 mode iret */
2348
void helper_iret_real(int shift)
2349
{
2350
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2351
    target_ulong ssp;
2352
    int eflags_mask;
2353

    
2354
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2355
    sp = ESP;
2356
    ssp = env->segs[R_SS].base;
2357
    if (shift == 1) {
2358
        /* 32 bits */
2359
        POPL(ssp, sp, sp_mask, new_eip);
2360
        POPL(ssp, sp, sp_mask, new_cs);
2361
        new_cs &= 0xffff;
2362
        POPL(ssp, sp, sp_mask, new_eflags);
2363
    } else {
2364
        /* 16 bits */
2365
        POPW(ssp, sp, sp_mask, new_eip);
2366
        POPW(ssp, sp, sp_mask, new_cs);
2367
        POPW(ssp, sp, sp_mask, new_eflags);
2368
    }
2369
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2370
    load_seg_vm(R_CS, new_cs);
2371
    env->eip = new_eip;
2372
    if (env->eflags & VM_MASK)
2373
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2374
    else
2375
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2376
    if (shift == 0)
2377
        eflags_mask &= 0xffff;
2378
    load_eflags(new_eflags, eflags_mask);
2379
}
2380

    
2381
static inline void validate_seg(int seg_reg, int cpl)
2382
{
2383
    int dpl;
2384
    uint32_t e2;
2385

    
2386
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2387
       they may still contain a valid base. I would be interested to
2388
       know how a real x86_64 CPU behaves */
2389
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2390
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2391
        return;
2392

    
2393
    e2 = env->segs[seg_reg].flags;
2394
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2395
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2396
        /* data or non conforming code segment */
2397
        if (dpl < cpl) {
2398
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2399
        }
2400
    }
2401
}
2402

    
2403
/* protected mode iret */
2404
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2405
{
2406
    uint32_t new_cs, new_eflags, new_ss;
2407
    uint32_t new_es, new_ds, new_fs, new_gs;
2408
    uint32_t e1, e2, ss_e1, ss_e2;
2409
    int cpl, dpl, rpl, eflags_mask, iopl;
2410
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2411

    
2412
#ifdef TARGET_X86_64
2413
    if (shift == 2)
2414
        sp_mask = -1;
2415
    else
2416
#endif
2417
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2418
    sp = ESP;
2419
    ssp = env->segs[R_SS].base;
2420
    new_eflags = 0; /* avoid warning */
2421
#ifdef TARGET_X86_64
2422
    if (shift == 2) {
2423
        POPQ(sp, new_eip);
2424
        POPQ(sp, new_cs);
2425
        new_cs &= 0xffff;
2426
        if (is_iret) {
2427
            POPQ(sp, new_eflags);
2428
        }
2429
    } else
2430
#endif
2431
    if (shift == 1) {
2432
        /* 32 bits */
2433
        POPL(ssp, sp, sp_mask, new_eip);
2434
        POPL(ssp, sp, sp_mask, new_cs);
2435
        new_cs &= 0xffff;
2436
        if (is_iret) {
2437
            POPL(ssp, sp, sp_mask, new_eflags);
2438
            if (new_eflags & VM_MASK)
2439
                goto return_to_vm86;
2440
        }
2441
    } else {
2442
        /* 16 bits */
2443
        POPW(ssp, sp, sp_mask, new_eip);
2444
        POPW(ssp, sp, sp_mask, new_cs);
2445
        if (is_iret)
2446
            POPW(ssp, sp, sp_mask, new_eflags);
2447
    }
2448
#ifdef DEBUG_PCALL
2449
    if (loglevel & CPU_LOG_PCALL) {
2450
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2451
                new_cs, new_eip, shift, addend);
2452
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2453
    }
2454
#endif
2455
    if ((new_cs & 0xfffc) == 0)
2456
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2457
    if (load_segment(&e1, &e2, new_cs) != 0)
2458
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459
    if (!(e2 & DESC_S_MASK) ||
2460
        !(e2 & DESC_CS_MASK))
2461
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2462
    cpl = env->hflags & HF_CPL_MASK;
2463
    rpl = new_cs & 3;
2464
    if (rpl < cpl)
2465
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2466
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2467
    if (e2 & DESC_C_MASK) {
2468
        if (dpl > rpl)
2469
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2470
    } else {
2471
        if (dpl != rpl)
2472
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2473
    }
2474
    if (!(e2 & DESC_P_MASK))
2475
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2476

    
2477
    sp += addend;
2478
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2479
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2480
        /* return to same priledge level */
2481
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2482
                       get_seg_base(e1, e2),
2483
                       get_seg_limit(e1, e2),
2484
                       e2);
2485
    } else {
2486
        /* return to different privilege level */
2487
#ifdef TARGET_X86_64
2488
        if (shift == 2) {
2489
            POPQ(sp, new_esp);
2490
            POPQ(sp, new_ss);
2491
            new_ss &= 0xffff;
2492
        } else
2493
#endif
2494
        if (shift == 1) {
2495
            /* 32 bits */
2496
            POPL(ssp, sp, sp_mask, new_esp);
2497
            POPL(ssp, sp, sp_mask, new_ss);
2498
            new_ss &= 0xffff;
2499
        } else {
2500
            /* 16 bits */
2501
            POPW(ssp, sp, sp_mask, new_esp);
2502
            POPW(ssp, sp, sp_mask, new_ss);
2503
        }
2504
#ifdef DEBUG_PCALL
2505
        if (loglevel & CPU_LOG_PCALL) {
2506
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2507
                    new_ss, new_esp);
2508
        }
2509
#endif
2510
        if ((new_ss & 0xfffc) == 0) {
2511
#ifdef TARGET_X86_64
2512
            /* NULL ss is allowed in long mode if cpl != 3*/
2513
            /* XXX: test CS64 ? */
2514
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2515
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2516
                                       0, 0xffffffff,
2517
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2518
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2519
                                       DESC_W_MASK | DESC_A_MASK);
2520
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2521
            } else
2522
#endif
2523
            {
2524
                raise_exception_err(EXCP0D_GPF, 0);
2525
            }
2526
        } else {
2527
            if ((new_ss & 3) != rpl)
2528
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2529
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2530
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2531
            if (!(ss_e2 & DESC_S_MASK) ||
2532
                (ss_e2 & DESC_CS_MASK) ||
2533
                !(ss_e2 & DESC_W_MASK))
2534
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2535
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2536
            if (dpl != rpl)
2537
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2538
            if (!(ss_e2 & DESC_P_MASK))
2539
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2540
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2541
                                   get_seg_base(ss_e1, ss_e2),
2542
                                   get_seg_limit(ss_e1, ss_e2),
2543
                                   ss_e2);
2544
        }
2545

    
2546
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2547
                       get_seg_base(e1, e2),
2548
                       get_seg_limit(e1, e2),
2549
                       e2);
2550
        cpu_x86_set_cpl(env, rpl);
2551
        sp = new_esp;
2552
#ifdef TARGET_X86_64
2553
        if (env->hflags & HF_CS64_MASK)
2554
            sp_mask = -1;
2555
        else
2556
#endif
2557
            sp_mask = get_sp_mask(ss_e2);
2558

    
2559
        /* validate data segments */
2560
        validate_seg(R_ES, rpl);
2561
        validate_seg(R_DS, rpl);
2562
        validate_seg(R_FS, rpl);
2563
        validate_seg(R_GS, rpl);
2564

    
2565
        sp += addend;
2566
    }
2567
    SET_ESP(sp, sp_mask);
2568
    env->eip = new_eip;
2569
    if (is_iret) {
2570
        /* NOTE: 'cpl' is the _old_ CPL */
2571
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2572
        if (cpl == 0)
2573
            eflags_mask |= IOPL_MASK;
2574
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2575
        if (cpl <= iopl)
2576
            eflags_mask |= IF_MASK;
2577
        if (shift == 0)
2578
            eflags_mask &= 0xffff;
2579
        load_eflags(new_eflags, eflags_mask);
2580
    }
2581
    return;
2582

    
2583
 return_to_vm86:
2584
    POPL(ssp, sp, sp_mask, new_esp);
2585
    POPL(ssp, sp, sp_mask, new_ss);
2586
    POPL(ssp, sp, sp_mask, new_es);
2587
    POPL(ssp, sp, sp_mask, new_ds);
2588
    POPL(ssp, sp, sp_mask, new_fs);
2589
    POPL(ssp, sp, sp_mask, new_gs);
2590

    
2591
    /* modify processor state */
2592
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2593
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2594
    load_seg_vm(R_CS, new_cs & 0xffff);
2595
    cpu_x86_set_cpl(env, 3);
2596
    load_seg_vm(R_SS, new_ss & 0xffff);
2597
    load_seg_vm(R_ES, new_es & 0xffff);
2598
    load_seg_vm(R_DS, new_ds & 0xffff);
2599
    load_seg_vm(R_FS, new_fs & 0xffff);
2600
    load_seg_vm(R_GS, new_gs & 0xffff);
2601

    
2602
    env->eip = new_eip & 0xffff;
2603
    ESP = new_esp;
2604
}
2605

    
2606
void helper_iret_protected(int shift, int next_eip)
2607
{
2608
    int tss_selector, type;
2609
    uint32_t e1, e2;
2610

    
2611
    /* specific case for TSS */
2612
    if (env->eflags & NT_MASK) {
2613
#ifdef TARGET_X86_64
2614
        if (env->hflags & HF_LMA_MASK)
2615
            raise_exception_err(EXCP0D_GPF, 0);
2616
#endif
2617
        tss_selector = lduw_kernel(env->tr.base + 0);
2618
        if (tss_selector & 4)
2619
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2620
        if (load_segment(&e1, &e2, tss_selector) != 0)
2621
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2622
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2623
        /* NOTE: we check both segment and busy TSS */
2624
        if (type != 3)
2625
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2626
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2627
    } else {
2628
        helper_ret_protected(shift, 1, 0);
2629
    }
2630
#ifdef USE_KQEMU
2631
    if (kqemu_is_ok(env)) {
2632
        CC_OP = CC_OP_EFLAGS;
2633
        env->exception_index = -1;
2634
        cpu_loop_exit();
2635
    }
2636
#endif
2637
}
2638

    
2639
void helper_lret_protected(int shift, int addend)
2640
{
2641
    helper_ret_protected(shift, 0, addend);
2642
#ifdef USE_KQEMU
2643
    if (kqemu_is_ok(env)) {
2644
        env->exception_index = -1;
2645
        cpu_loop_exit();
2646
    }
2647
#endif
2648
}
2649

    
2650
void helper_sysenter(void)
2651
{
2652
    if (env->sysenter_cs == 0) {
2653
        raise_exception_err(EXCP0D_GPF, 0);
2654
    }
2655
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2656
    cpu_x86_set_cpl(env, 0);
2657
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2658
                           0, 0xffffffff,
2659
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2660
                           DESC_S_MASK |
2661
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2662
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2663
                           0, 0xffffffff,
2664
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2665
                           DESC_S_MASK |
2666
                           DESC_W_MASK | DESC_A_MASK);
2667
    ESP = env->sysenter_esp;
2668
    EIP = env->sysenter_eip;
2669
}
2670

    
2671
void helper_sysexit(void)
2672
{
2673
    int cpl;
2674

    
2675
    cpl = env->hflags & HF_CPL_MASK;
2676
    if (env->sysenter_cs == 0 || cpl != 0) {
2677
        raise_exception_err(EXCP0D_GPF, 0);
2678
    }
2679
    cpu_x86_set_cpl(env, 3);
2680
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2681
                           0, 0xffffffff,
2682
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2684
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2685
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2686
                           0, 0xffffffff,
2687
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2688
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2689
                           DESC_W_MASK | DESC_A_MASK);
2690
    ESP = ECX;
2691
    EIP = EDX;
2692
#ifdef USE_KQEMU
2693
    if (kqemu_is_ok(env)) {
2694
        env->exception_index = -1;
2695
        cpu_loop_exit();
2696
    }
2697
#endif
2698
}
2699

    
2700
void helper_movl_crN_T0(int reg)
2701
{
2702
#if !defined(CONFIG_USER_ONLY)
2703
    switch(reg) {
2704
    case 0:
2705
        cpu_x86_update_cr0(env, T0);
2706
        break;
2707
    case 3:
2708
        cpu_x86_update_cr3(env, T0);
2709
        break;
2710
    case 4:
2711
        cpu_x86_update_cr4(env, T0);
2712
        break;
2713
    case 8:
2714
        cpu_set_apic_tpr(env, T0);
2715
        break;
2716
    default:
2717
        env->cr[reg] = T0;
2718
        break;
2719
    }
2720
#endif
2721
}
2722

    
2723
/* XXX: do more */
2724
void helper_movl_drN_T0(int reg)
2725
{
2726
    env->dr[reg] = T0;
2727
}
2728

    
2729
void helper_invlpg(target_ulong addr)
2730
{
2731
    cpu_x86_flush_tlb(env, addr);
2732
}
2733

    
2734
void helper_rdtsc(void)
2735
{
2736
    uint64_t val;
2737

    
2738
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2739
        raise_exception(EXCP0D_GPF);
2740
    }
2741
    val = cpu_get_tsc(env);
2742
    EAX = (uint32_t)(val);
2743
    EDX = (uint32_t)(val >> 32);
2744
}
2745

    
2746
void helper_rdpmc(void)
2747
{
2748
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2749
        raise_exception(EXCP0D_GPF);
2750
    }
2751

    
2752
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2753
        /* currently unimplemented */
2754
        raise_exception_err(EXCP06_ILLOP, 0);
2755
    }
2756
}
2757

    
2758
#if defined(CONFIG_USER_ONLY)
2759
void helper_wrmsr(void)
2760
{
2761
}
2762

    
2763
void helper_rdmsr(void)
2764
{
2765
}
2766
#else
2767
void helper_wrmsr(void)
2768
{
2769
    uint64_t val;
2770

    
2771
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2772

    
2773
    switch((uint32_t)ECX) {
2774
    case MSR_IA32_SYSENTER_CS:
2775
        env->sysenter_cs = val & 0xffff;
2776
        break;
2777
    case MSR_IA32_SYSENTER_ESP:
2778
        env->sysenter_esp = val;
2779
        break;
2780
    case MSR_IA32_SYSENTER_EIP:
2781
        env->sysenter_eip = val;
2782
        break;
2783
    case MSR_IA32_APICBASE:
2784
        cpu_set_apic_base(env, val);
2785
        break;
2786
    case MSR_EFER:
2787
        {
2788
            uint64_t update_mask;
2789
            update_mask = 0;
2790
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2791
                update_mask |= MSR_EFER_SCE;
2792
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2793
                update_mask |= MSR_EFER_LME;
2794
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2795
                update_mask |= MSR_EFER_FFXSR;
2796
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2797
                update_mask |= MSR_EFER_NXE;
2798
            env->efer = (env->efer & ~update_mask) |
2799
            (val & update_mask);
2800
        }
2801
        break;
2802
    case MSR_STAR:
2803
        env->star = val;
2804
        break;
2805
    case MSR_PAT:
2806
        env->pat = val;
2807
        break;
2808
    case MSR_VM_HSAVE_PA:
2809
        env->vm_hsave = val;
2810
        break;
2811
#ifdef TARGET_X86_64
2812
    case MSR_LSTAR:
2813
        env->lstar = val;
2814
        break;
2815
    case MSR_CSTAR:
2816
        env->cstar = val;
2817
        break;
2818
    case MSR_FMASK:
2819
        env->fmask = val;
2820
        break;
2821
    case MSR_FSBASE:
2822
        env->segs[R_FS].base = val;
2823
        break;
2824
    case MSR_GSBASE:
2825
        env->segs[R_GS].base = val;
2826
        break;
2827
    case MSR_KERNELGSBASE:
2828
        env->kernelgsbase = val;
2829
        break;
2830
#endif
2831
    default:
2832
        /* XXX: exception ? */
2833
        break;
2834
    }
2835
}
2836

    
2837
void helper_rdmsr(void)
2838
{
2839
    uint64_t val;
2840
    switch((uint32_t)ECX) {
2841
    case MSR_IA32_SYSENTER_CS:
2842
        val = env->sysenter_cs;
2843
        break;
2844
    case MSR_IA32_SYSENTER_ESP:
2845
        val = env->sysenter_esp;
2846
        break;
2847
    case MSR_IA32_SYSENTER_EIP:
2848
        val = env->sysenter_eip;
2849
        break;
2850
    case MSR_IA32_APICBASE:
2851
        val = cpu_get_apic_base(env);
2852
        break;
2853
    case MSR_EFER:
2854
        val = env->efer;
2855
        break;
2856
    case MSR_STAR:
2857
        val = env->star;
2858
        break;
2859
    case MSR_PAT:
2860
        val = env->pat;
2861
        break;
2862
    case MSR_VM_HSAVE_PA:
2863
        val = env->vm_hsave;
2864
        break;
2865
#ifdef TARGET_X86_64
2866
    case MSR_LSTAR:
2867
        val = env->lstar;
2868
        break;
2869
    case MSR_CSTAR:
2870
        val = env->cstar;
2871
        break;
2872
    case MSR_FMASK:
2873
        val = env->fmask;
2874
        break;
2875
    case MSR_FSBASE:
2876
        val = env->segs[R_FS].base;
2877
        break;
2878
    case MSR_GSBASE:
2879
        val = env->segs[R_GS].base;
2880
        break;
2881
    case MSR_KERNELGSBASE:
2882
        val = env->kernelgsbase;
2883
        break;
2884
#endif
2885
    default:
2886
        /* XXX: exception ? */
2887
        val = 0;
2888
        break;
2889
    }
2890
    EAX = (uint32_t)(val);
2891
    EDX = (uint32_t)(val >> 32);
2892
}
2893
#endif
2894

    
2895
void helper_lsl(void)
2896
{
2897
    unsigned int selector, limit;
2898
    uint32_t e1, e2, eflags;
2899
    int rpl, dpl, cpl, type;
2900

    
2901
    eflags = cc_table[CC_OP].compute_all();
2902
    selector = T0 & 0xffff;
2903
    if (load_segment(&e1, &e2, selector) != 0)
2904
        goto fail;
2905
    rpl = selector & 3;
2906
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2907
    cpl = env->hflags & HF_CPL_MASK;
2908
    if (e2 & DESC_S_MASK) {
2909
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2910
            /* conforming */
2911
        } else {
2912
            if (dpl < cpl || dpl < rpl)
2913
                goto fail;
2914
        }
2915
    } else {
2916
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2917
        switch(type) {
2918
        case 1:
2919
        case 2:
2920
        case 3:
2921
        case 9:
2922
        case 11:
2923
            break;
2924
        default:
2925
            goto fail;
2926
        }
2927
        if (dpl < cpl || dpl < rpl) {
2928
        fail:
2929
            CC_SRC = eflags & ~CC_Z;
2930
            return;
2931
        }
2932
    }
2933
    limit = get_seg_limit(e1, e2);
2934
    T1 = limit;
2935
    CC_SRC = eflags | CC_Z;
2936
}
2937

    
2938
void helper_lar(void)
2939
{
2940
    unsigned int selector;
2941
    uint32_t e1, e2, eflags;
2942
    int rpl, dpl, cpl, type;
2943

    
2944
    eflags = cc_table[CC_OP].compute_all();
2945
    selector = T0 & 0xffff;
2946
    if ((selector & 0xfffc) == 0)
2947
        goto fail;
2948
    if (load_segment(&e1, &e2, selector) != 0)
2949
        goto fail;
2950
    rpl = selector & 3;
2951
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2952
    cpl = env->hflags & HF_CPL_MASK;
2953
    if (e2 & DESC_S_MASK) {
2954
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2955
            /* conforming */
2956
        } else {
2957
            if (dpl < cpl || dpl < rpl)
2958
                goto fail;
2959
        }
2960
    } else {
2961
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2962
        switch(type) {
2963
        case 1:
2964
        case 2:
2965
        case 3:
2966
        case 4:
2967
        case 5:
2968
        case 9:
2969
        case 11:
2970
        case 12:
2971
            break;
2972
        default:
2973
            goto fail;
2974
        }
2975
        if (dpl < cpl || dpl < rpl) {
2976
        fail:
2977
            CC_SRC = eflags & ~CC_Z;
2978
            return;
2979
        }
2980
    }
2981
    T1 = e2 & 0x00f0ff00;
2982
    CC_SRC = eflags | CC_Z;
2983
}
2984

    
2985
void helper_verr(void)
2986
{
2987
    unsigned int selector;
2988
    uint32_t e1, e2, eflags;
2989
    int rpl, dpl, cpl;
2990

    
2991
    eflags = cc_table[CC_OP].compute_all();
2992
    selector = T0 & 0xffff;
2993
    if ((selector & 0xfffc) == 0)
2994
        goto fail;
2995
    if (load_segment(&e1, &e2, selector) != 0)
2996
        goto fail;
2997
    if (!(e2 & DESC_S_MASK))
2998
        goto fail;
2999
    rpl = selector & 3;
3000
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3001
    cpl = env->hflags & HF_CPL_MASK;
3002
    if (e2 & DESC_CS_MASK) {
3003
        if (!(e2 & DESC_R_MASK))
3004
            goto fail;
3005
        if (!(e2 & DESC_C_MASK)) {
3006
            if (dpl < cpl || dpl < rpl)
3007
                goto fail;
3008
        }
3009
    } else {
3010
        if (dpl < cpl || dpl < rpl) {
3011
        fail:
3012
            CC_SRC = eflags & ~CC_Z;
3013
            return;
3014
        }
3015
    }
3016
    CC_SRC = eflags | CC_Z;
3017
}
3018

    
3019
void helper_verw(void)
3020
{
3021
    unsigned int selector;
3022
    uint32_t e1, e2, eflags;
3023
    int rpl, dpl, cpl;
3024

    
3025
    eflags = cc_table[CC_OP].compute_all();
3026
    selector = T0 & 0xffff;
3027
    if ((selector & 0xfffc) == 0)
3028
        goto fail;
3029
    if (load_segment(&e1, &e2, selector) != 0)
3030
        goto fail;
3031
    if (!(e2 & DESC_S_MASK))
3032
        goto fail;
3033
    rpl = selector & 3;
3034
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3035
    cpl = env->hflags & HF_CPL_MASK;
3036
    if (e2 & DESC_CS_MASK) {
3037
        goto fail;
3038
    } else {
3039
        if (dpl < cpl || dpl < rpl)
3040
            goto fail;
3041
        if (!(e2 & DESC_W_MASK)) {
3042
        fail:
3043
            CC_SRC = eflags & ~CC_Z;
3044
            return;
3045
        }
3046
    }
3047
    CC_SRC = eflags | CC_Z;
3048
}
3049

    
3050
/* FPU helpers */
3051

    
3052
void helper_fldt_ST0_A0(void)
3053
{
3054
    int new_fpstt;
3055
    new_fpstt = (env->fpstt - 1) & 7;
3056
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3057
    env->fpstt = new_fpstt;
3058
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3059
}
3060

    
3061
void helper_fstt_ST0_A0(void)
3062
{
3063
    helper_fstt(ST0, A0);
3064
}
3065

    
3066
static void fpu_set_exception(int mask)
3067
{
3068
    env->fpus |= mask;
3069
    if (env->fpus & (~env->fpuc & FPUC_EM))
3070
        env->fpus |= FPUS_SE | FPUS_B;
3071
}
3072

    
3073
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3074
{
3075
    if (b == 0.0)
3076
        fpu_set_exception(FPUS_ZE);
3077
    return a / b;
3078
}
3079

    
3080
void fpu_raise_exception(void)
3081
{
3082
    if (env->cr[0] & CR0_NE_MASK) {
3083
        raise_exception(EXCP10_COPR);
3084
    }
3085
#if !defined(CONFIG_USER_ONLY)
3086
    else {
3087
        cpu_set_ferr(env);
3088
    }
3089
#endif
3090
}
3091

    
3092
/* BCD ops */
3093

    
3094
void helper_fbld_ST0_A0(void)
3095
{
3096
    CPU86_LDouble tmp;
3097
    uint64_t val;
3098
    unsigned int v;
3099
    int i;
3100

    
3101
    val = 0;
3102
    for(i = 8; i >= 0; i--) {
3103
        v = ldub(A0 + i);
3104
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3105
    }
3106
    tmp = val;
3107
    if (ldub(A0 + 9) & 0x80)
3108
        tmp = -tmp;
3109
    fpush();
3110
    ST0 = tmp;
3111
}
3112

    
3113
void helper_fbst_ST0_A0(void)
3114
{
3115
    int v;
3116
    target_ulong mem_ref, mem_end;
3117
    int64_t val;
3118

    
3119
    val = floatx_to_int64(ST0, &env->fp_status);
3120
    mem_ref = A0;
3121
    mem_end = mem_ref + 9;
3122
    if (val < 0) {
3123
        stb(mem_end, 0x80);
3124
        val = -val;
3125
    } else {
3126
        stb(mem_end, 0x00);
3127
    }
3128
    while (mem_ref < mem_end) {
3129
        if (val == 0)
3130
            break;
3131
        v = val % 100;
3132
        val = val / 100;
3133
        v = ((v / 10) << 4) | (v % 10);
3134
        stb(mem_ref++, v);
3135
    }
3136
    while (mem_ref < mem_end) {
3137
        stb(mem_ref++, 0);
3138
    }
3139
}
3140

    
3141
void helper_f2xm1(void)
3142
{
3143
    ST0 = pow(2.0,ST0) - 1.0;
3144
}
3145

    
3146
void helper_fyl2x(void)
3147
{
3148
    CPU86_LDouble fptemp;
3149

    
3150
    fptemp = ST0;
3151
    if (fptemp>0.0){
3152
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3153
        ST1 *= fptemp;
3154
        fpop();
3155
    } else {
3156
        env->fpus &= (~0x4700);
3157
        env->fpus |= 0x400;
3158
    }
3159
}
3160

    
3161
void helper_fptan(void)
3162
{
3163
    CPU86_LDouble fptemp;
3164

    
3165
    fptemp = ST0;
3166
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3167
        env->fpus |= 0x400;
3168
    } else {
3169
        ST0 = tan(fptemp);
3170
        fpush();
3171
        ST0 = 1.0;
3172
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3173
        /* the above code is for  |arg| < 2**52 only */
3174
    }
3175
}
3176

    
3177
void helper_fpatan(void)
3178
{
3179
    CPU86_LDouble fptemp, fpsrcop;
3180

    
3181
    fpsrcop = ST1;
3182
    fptemp = ST0;
3183
    ST1 = atan2(fpsrcop,fptemp);
3184
    fpop();
3185
}
3186

    
3187
void helper_fxtract(void)
3188
{
3189
    CPU86_LDoubleU temp;
3190
    unsigned int expdif;
3191

    
3192
    temp.d = ST0;
3193
    expdif = EXPD(temp) - EXPBIAS;
3194
    /*DP exponent bias*/
3195
    ST0 = expdif;
3196
    fpush();
3197
    BIASEXPONENT(temp);
3198
    ST0 = temp.d;
3199
}
3200

    
3201
void helper_fprem1(void)
3202
{
3203
    CPU86_LDouble dblq, fpsrcop, fptemp;
3204
    CPU86_LDoubleU fpsrcop1, fptemp1;
3205
    int expdif;
3206
    signed long long int q;
3207

    
3208
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3209
        ST0 = 0.0 / 0.0; /* NaN */
3210
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3211
        return;
3212
    }
3213

    
3214
    fpsrcop = ST0;
3215
    fptemp = ST1;
3216
    fpsrcop1.d = fpsrcop;
3217
    fptemp1.d = fptemp;
3218
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3219

    
3220
    if (expdif < 0) {
3221
        /* optimisation? taken from the AMD docs */
3222
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3223
        /* ST0 is unchanged */
3224
        return;
3225
    }
3226

    
3227
    if (expdif < 53) {
3228
        dblq = fpsrcop / fptemp;
3229
        /* round dblq towards nearest integer */
3230
        dblq = rint(dblq);
3231
        ST0 = fpsrcop - fptemp * dblq;
3232

    
3233
        /* convert dblq to q by truncating towards zero */
3234
        if (dblq < 0.0)
3235
           q = (signed long long int)(-dblq);
3236
        else
3237
           q = (signed long long int)dblq;
3238

    
3239
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3240
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3241
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3242
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3243
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3244
    } else {
3245
        env->fpus |= 0x400;  /* C2 <-- 1 */
3246
        fptemp = pow(2.0, expdif - 50);
3247
        fpsrcop = (ST0 / ST1) / fptemp;
3248
        /* fpsrcop = integer obtained by chopping */
3249
        fpsrcop = (fpsrcop < 0.0) ?
3250
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3251
        ST0 -= (ST1 * fpsrcop * fptemp);
3252
    }
3253
}
3254

    
3255
void helper_fprem(void)
3256
{
3257
    CPU86_LDouble dblq, fpsrcop, fptemp;
3258
    CPU86_LDoubleU fpsrcop1, fptemp1;
3259
    int expdif;
3260
    signed long long int q;
3261

    
3262
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3263
       ST0 = 0.0 / 0.0; /* NaN */
3264
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3265
       return;
3266
    }
3267

    
3268
    fpsrcop = (CPU86_LDouble)ST0;
3269
    fptemp = (CPU86_LDouble)ST1;
3270
    fpsrcop1.d = fpsrcop;
3271
    fptemp1.d = fptemp;
3272
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3273

    
3274
    if (expdif < 0) {
3275
        /* optimisation? taken from the AMD docs */
3276
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3277
        /* ST0 is unchanged */
3278
        return;
3279
    }
3280

    
3281
    if ( expdif < 53 ) {
3282
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3283
        /* round dblq towards zero */
3284
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3285
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3286

    
3287
        /* convert dblq to q by truncating towards zero */
3288
        if (dblq < 0.0)
3289
           q = (signed long long int)(-dblq);
3290
        else
3291
           q = (signed long long int)dblq;
3292

    
3293
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3294
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3295
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3296
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3297
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3298
    } else {
3299
        int N = 32 + (expdif % 32); /* as per AMD docs */
3300
        env->fpus |= 0x400;  /* C2 <-- 1 */
3301
        fptemp = pow(2.0, (double)(expdif - N));
3302
        fpsrcop = (ST0 / ST1) / fptemp;
3303
        /* fpsrcop = integer obtained by chopping */
3304
        fpsrcop = (fpsrcop < 0.0) ?
3305
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3306
        ST0 -= (ST1 * fpsrcop * fptemp);
3307
    }
3308
}
3309

    
3310
void helper_fyl2xp1(void)
3311
{
3312
    CPU86_LDouble fptemp;
3313

    
3314
    fptemp = ST0;
3315
    if ((fptemp+1.0)>0.0) {
3316
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3317
        ST1 *= fptemp;
3318
        fpop();
3319
    } else {
3320
        env->fpus &= (~0x4700);
3321
        env->fpus |= 0x400;
3322
    }
3323
}
3324

    
3325
void helper_fsqrt(void)
3326
{
3327
    CPU86_LDouble fptemp;
3328

    
3329
    fptemp = ST0;
3330
    if (fptemp<0.0) {
3331
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3332
        env->fpus |= 0x400;
3333
    }
3334
    ST0 = sqrt(fptemp);
3335
}
3336

    
3337
void helper_fsincos(void)
3338
{
3339
    CPU86_LDouble fptemp;
3340

    
3341
    fptemp = ST0;
3342
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3343
        env->fpus |= 0x400;
3344
    } else {
3345
        ST0 = sin(fptemp);
3346
        fpush();
3347
        ST0 = cos(fptemp);
3348
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3349
        /* the above code is for  |arg| < 2**63 only */
3350
    }
3351
}
3352

    
3353
void helper_frndint(void)
3354
{
3355
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3356
}
3357

    
3358
void helper_fscale(void)
3359
{
3360
    ST0 = ldexp (ST0, (int)(ST1));
3361
}
3362

    
3363
void helper_fsin(void)
3364
{
3365
    CPU86_LDouble fptemp;
3366

    
3367
    fptemp = ST0;
3368
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3369
        env->fpus |= 0x400;
3370
    } else {
3371
        ST0 = sin(fptemp);
3372
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3373
        /* the above code is for  |arg| < 2**53 only */
3374
    }
3375
}
3376

    
3377
void helper_fcos(void)
3378
{
3379
    CPU86_LDouble fptemp;
3380

    
3381
    fptemp = ST0;
3382
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3383
        env->fpus |= 0x400;
3384
    } else {
3385
        ST0 = cos(fptemp);
3386
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3387
        /* the above code is for  |arg5 < 2**63 only */
3388
    }
3389
}
3390

    
3391
void helper_fxam_ST0(void)
3392
{
3393
    CPU86_LDoubleU temp;
3394
    int expdif;
3395

    
3396
    temp.d = ST0;
3397

    
3398
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3399
    if (SIGND(temp))
3400
        env->fpus |= 0x200; /* C1 <-- 1 */
3401

    
3402
    /* XXX: test fptags too */
3403
    expdif = EXPD(temp);
3404
    if (expdif == MAXEXPD) {
3405
#ifdef USE_X86LDOUBLE
3406
        if (MANTD(temp) == 0x8000000000000000ULL)
3407
#else
3408
        if (MANTD(temp) == 0)
3409
#endif
3410
            env->fpus |=  0x500 /*Infinity*/;
3411
        else
3412
            env->fpus |=  0x100 /*NaN*/;
3413
    } else if (expdif == 0) {
3414
        if (MANTD(temp) == 0)
3415
            env->fpus |=  0x4000 /*Zero*/;
3416
        else
3417
            env->fpus |= 0x4400 /*Denormal*/;
3418
    } else {
3419
        env->fpus |= 0x400;
3420
    }
3421
}
3422

    
3423
void helper_fstenv(target_ulong ptr, int data32)
3424
{
3425
    int fpus, fptag, exp, i;
3426
    uint64_t mant;
3427
    CPU86_LDoubleU tmp;
3428

    
3429
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3430
    fptag = 0;
3431
    for (i=7; i>=0; i--) {
3432
        fptag <<= 2;
3433
        if (env->fptags[i]) {
3434
            fptag |= 3;
3435
        } else {
3436
            tmp.d = env->fpregs[i].d;
3437
            exp = EXPD(tmp);
3438
            mant = MANTD(tmp);
3439
            if (exp == 0 && mant == 0) {
3440
                /* zero */
3441
                fptag |= 1;
3442
            } else if (exp == 0 || exp == MAXEXPD
3443
#ifdef USE_X86LDOUBLE
3444
                       || (mant & (1LL << 63)) == 0
3445
#endif
3446
                       ) {
3447
                /* NaNs, infinity, denormal */
3448
                fptag |= 2;
3449
            }
3450
        }
3451
    }
3452
    if (data32) {
3453
        /* 32 bit */
3454
        stl(ptr, env->fpuc);
3455
        stl(ptr + 4, fpus);
3456
        stl(ptr + 8, fptag);
3457
        stl(ptr + 12, 0); /* fpip */
3458
        stl(ptr + 16, 0); /* fpcs */
3459
        stl(ptr + 20, 0); /* fpoo */
3460
        stl(ptr + 24, 0); /* fpos */
3461
    } else {
3462
        /* 16 bit */
3463
        stw(ptr, env->fpuc);
3464
        stw(ptr + 2, fpus);
3465
        stw(ptr + 4, fptag);
3466
        stw(ptr + 6, 0);
3467
        stw(ptr + 8, 0);
3468
        stw(ptr + 10, 0);
3469
        stw(ptr + 12, 0);
3470
    }
3471
}
3472

    
3473
void helper_fldenv(target_ulong ptr, int data32)
3474
{
3475
    int i, fpus, fptag;
3476

    
3477
    if (data32) {
3478
        env->fpuc = lduw(ptr);
3479
        fpus = lduw(ptr + 4);
3480
        fptag = lduw(ptr + 8);
3481
    }
3482
    else {
3483
        env->fpuc = lduw(ptr);
3484
        fpus = lduw(ptr + 2);
3485
        fptag = lduw(ptr + 4);
3486
    }
3487
    env->fpstt = (fpus >> 11) & 7;
3488
    env->fpus = fpus & ~0x3800;
3489
    for(i = 0;i < 8; i++) {
3490
        env->fptags[i] = ((fptag & 3) == 3);
3491
        fptag >>= 2;
3492
    }
3493
}
3494

    
3495
void helper_fsave(target_ulong ptr, int data32)
3496
{
3497
    CPU86_LDouble tmp;
3498
    int i;
3499

    
3500
    helper_fstenv(ptr, data32);
3501

    
3502
    ptr += (14 << data32);
3503
    for(i = 0;i < 8; i++) {
3504
        tmp = ST(i);
3505
        helper_fstt(tmp, ptr);
3506
        ptr += 10;
3507
    }
3508

    
3509
    /* fninit */
3510
    env->fpus = 0;
3511
    env->fpstt = 0;
3512
    env->fpuc = 0x37f;
3513
    env->fptags[0] = 1;
3514
    env->fptags[1] = 1;
3515
    env->fptags[2] = 1;
3516
    env->fptags[3] = 1;
3517
    env->fptags[4] = 1;
3518
    env->fptags[5] = 1;
3519
    env->fptags[6] = 1;
3520
    env->fptags[7] = 1;
3521
}
3522

    
3523
void helper_frstor(target_ulong ptr, int data32)
3524
{
3525
    CPU86_LDouble tmp;
3526
    int i;
3527

    
3528
    helper_fldenv(ptr, data32);
3529
    ptr += (14 << data32);
3530

    
3531
    for(i = 0;i < 8; i++) {
3532
        tmp = helper_fldt(ptr);
3533
        ST(i) = tmp;
3534
        ptr += 10;
3535
    }
3536
}
3537

    
3538
void helper_fxsave(target_ulong ptr, int data64)
3539
{
3540
    int fpus, fptag, i, nb_xmm_regs;
3541
    CPU86_LDouble tmp;
3542
    target_ulong addr;
3543

    
3544
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3545
    fptag = 0;
3546
    for(i = 0; i < 8; i++) {
3547
        fptag |= (env->fptags[i] << i);
3548
    }
3549
    stw(ptr, env->fpuc);
3550
    stw(ptr + 2, fpus);
3551
    stw(ptr + 4, fptag ^ 0xff);
3552

    
3553
    addr = ptr + 0x20;
3554
    for(i = 0;i < 8; i++) {
3555
        tmp = ST(i);
3556
        helper_fstt(tmp, addr);
3557
        addr += 16;
3558
    }
3559

    
3560
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3561
        /* XXX: finish it */
3562
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3563
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3564
        nb_xmm_regs = 8 << data64;
3565
        addr = ptr + 0xa0;
3566
        for(i = 0; i < nb_xmm_regs; i++) {
3567
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3568
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3569
            addr += 16;
3570
        }
3571
    }
3572
}
3573

    
3574
void helper_fxrstor(target_ulong ptr, int data64)
3575
{
3576
    int i, fpus, fptag, nb_xmm_regs;
3577
    CPU86_LDouble tmp;
3578
    target_ulong addr;
3579

    
3580
    env->fpuc = lduw(ptr);
3581
    fpus = lduw(ptr + 2);
3582
    fptag = lduw(ptr + 4);
3583
    env->fpstt = (fpus >> 11) & 7;
3584
    env->fpus = fpus & ~0x3800;
3585
    fptag ^= 0xff;
3586
    for(i = 0;i < 8; i++) {
3587
        env->fptags[i] = ((fptag >> i) & 1);
3588
    }
3589

    
3590
    addr = ptr + 0x20;
3591
    for(i = 0;i < 8; i++) {
3592
        tmp = helper_fldt(addr);
3593
        ST(i) = tmp;
3594
        addr += 16;
3595
    }
3596

    
3597
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3598
        /* XXX: finish it */
3599
        env->mxcsr = ldl(ptr + 0x18);
3600
        //ldl(ptr + 0x1c);
3601
        nb_xmm_regs = 8 << data64;
3602
        addr = ptr + 0xa0;
3603
        for(i = 0; i < nb_xmm_regs; i++) {
3604
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3605
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3606
            addr += 16;
3607
        }
3608
    }
3609
}
3610

    
3611
#ifndef USE_X86LDOUBLE
3612

    
3613
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3614
{
3615
    CPU86_LDoubleU temp;
3616
    int e;
3617

    
3618
    temp.d = f;
3619
    /* mantissa */
3620
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3621
    /* exponent + sign */
3622
    e = EXPD(temp) - EXPBIAS + 16383;
3623
    e |= SIGND(temp) >> 16;
3624
    *pexp = e;
3625
}
3626

    
3627
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3628
{
3629
    CPU86_LDoubleU temp;
3630
    int e;
3631
    uint64_t ll;
3632

    
3633
    /* XXX: handle overflow ? */
3634
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3635
    e |= (upper >> 4) & 0x800; /* sign */
3636
    ll = (mant >> 11) & ((1LL << 52) - 1);
3637
#ifdef __arm__
3638
    temp.l.upper = (e << 20) | (ll >> 32);
3639
    temp.l.lower = ll;
3640
#else
3641
    temp.ll = ll | ((uint64_t)e << 52);
3642
#endif
3643
    return temp.d;
3644
}
3645

    
3646
#else
3647

    
3648
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3649
{
3650
    CPU86_LDoubleU temp;
3651

    
3652
    temp.d = f;
3653
    *pmant = temp.l.lower;
3654
    *pexp = temp.l.upper;
3655
}
3656

    
3657
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3658
{
3659
    CPU86_LDoubleU temp;
3660

    
3661
    temp.l.upper = upper;
3662
    temp.l.lower = mant;
3663
    return temp.d;
3664
}
3665
#endif
3666

    
3667
#ifdef TARGET_X86_64
3668

    
3669
//#define DEBUG_MULDIV
3670

    
3671
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3672
{
3673
    *plow += a;
3674
    /* carry test */
3675
    if (*plow < a)
3676
        (*phigh)++;
3677
    *phigh += b;
3678
}
3679

    
3680
static void neg128(uint64_t *plow, uint64_t *phigh)
3681
{
3682
    *plow = ~ *plow;
3683
    *phigh = ~ *phigh;
3684
    add128(plow, phigh, 1, 0);
3685
}
3686

    
3687
/* return TRUE if overflow */
3688
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3689
{
3690
    uint64_t q, r, a1, a0;
3691
    int i, qb, ab;
3692

    
3693
    a0 = *plow;
3694
    a1 = *phigh;
3695
    if (a1 == 0) {
3696
        q = a0 / b;
3697
        r = a0 % b;
3698
        *plow = q;
3699
        *phigh = r;
3700
    } else {
3701
        if (a1 >= b)
3702
            return 1;
3703
        /* XXX: use a better algorithm */
3704
        for(i = 0; i < 64; i++) {
3705
            ab = a1 >> 63;
3706
            a1 = (a1 << 1) | (a0 >> 63);
3707
            if (ab || a1 >= b) {
3708
                a1 -= b;
3709
                qb = 1;
3710
            } else {
3711
                qb = 0;
3712
            }
3713
            a0 = (a0 << 1) | qb;
3714
        }
3715
#if defined(DEBUG_MULDIV)
3716
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3717
               *phigh, *plow, b, a0, a1);
3718
#endif
3719
        *plow = a0;
3720
        *phigh = a1;
3721
    }
3722
    return 0;
3723
}
3724

    
3725
/* return TRUE if overflow */
3726
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3727
{
3728
    int sa, sb;
3729
    sa = ((int64_t)*phigh < 0);
3730
    if (sa)
3731
        neg128(plow, phigh);
3732
    sb = (b < 0);
3733
    if (sb)
3734
        b = -b;
3735
    if (div64(plow, phigh, b) != 0)
3736
        return 1;
3737
    if (sa ^ sb) {
3738
        if (*plow > (1ULL << 63))
3739
            return 1;
3740
        *plow = - *plow;
3741
    } else {
3742
        if (*plow >= (1ULL << 63))
3743
            return 1;
3744
    }
3745
    if (sa)
3746
        *phigh = - *phigh;
3747
    return 0;
3748
}
3749

    
3750
void helper_mulq_EAX_T0(void)
3751
{
3752
    uint64_t r0, r1;
3753

    
3754
    mulu64(&r0, &r1, EAX, T0);
3755
    EAX = r0;
3756
    EDX = r1;
3757
    CC_DST = r0;
3758
    CC_SRC = r1;
3759
}
3760

    
3761
void helper_imulq_EAX_T0(void)
3762
{
3763
    uint64_t r0, r1;
3764

    
3765
    muls64(&r0, &r1, EAX, T0);
3766
    EAX = r0;
3767
    EDX = r1;
3768
    CC_DST = r0;
3769
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3770
}
3771

    
3772
void helper_imulq_T0_T1(void)
3773
{
3774
    uint64_t r0, r1;
3775

    
3776
    muls64(&r0, &r1, T0, T1);
3777
    T0 = r0;
3778
    CC_DST = r0;
3779
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3780
}
3781

    
3782
void helper_divq_EAX_T0(void)
3783
{
3784
    uint64_t r0, r1;
3785
    if (T0 == 0) {
3786
        raise_exception(EXCP00_DIVZ);
3787
    }
3788
    r0 = EAX;
3789
    r1 = EDX;
3790
    if (div64(&r0, &r1, T0))
3791
        raise_exception(EXCP00_DIVZ);
3792
    EAX = r0;
3793
    EDX = r1;
3794
}
3795

    
3796
void helper_idivq_EAX_T0(void)
3797
{
3798
    uint64_t r0, r1;
3799
    if (T0 == 0) {
3800
        raise_exception(EXCP00_DIVZ);
3801
    }
3802
    r0 = EAX;
3803
    r1 = EDX;
3804
    if (idiv64(&r0, &r1, T0))
3805
        raise_exception(EXCP00_DIVZ);
3806
    EAX = r0;
3807
    EDX = r1;
3808
}
3809

    
3810
void helper_bswapq_T0(void)
3811
{
3812
    T0 = bswap64(T0);
3813
}
3814
#endif
3815

    
3816
void helper_hlt(void)
3817
{
3818
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3819
    env->hflags |= HF_HALTED_MASK;
3820
    env->exception_index = EXCP_HLT;
3821
    cpu_loop_exit();
3822
}
3823

    
3824
void helper_monitor(void)
3825
{
3826
    if ((uint32_t)ECX != 0)
3827
        raise_exception(EXCP0D_GPF);
3828
    /* XXX: store address ? */
3829
}
3830

    
3831
void helper_mwait(void)
3832
{
3833
    if ((uint32_t)ECX != 0)
3834
        raise_exception(EXCP0D_GPF);
3835
    /* XXX: not complete but not completely erroneous */
3836
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3837
        /* more than one CPU: do not sleep because another CPU may
3838
           wake this one */
3839
    } else {
3840
        helper_hlt();
3841
    }
3842
}
3843

    
3844
float approx_rsqrt(float a)
3845
{
3846
    return 1.0 / sqrt(a);
3847
}
3848

    
3849
float approx_rcp(float a)
3850
{
3851
    return 1.0 / a;
3852
}
3853

    
3854
void update_fp_status(void)
3855
{
3856
    int rnd_type;
3857

    
3858
    /* set rounding mode */
3859
    switch(env->fpuc & RC_MASK) {
3860
    default:
3861
    case RC_NEAR:
3862
        rnd_type = float_round_nearest_even;
3863
        break;
3864
    case RC_DOWN:
3865
        rnd_type = float_round_down;
3866
        break;
3867
    case RC_UP:
3868
        rnd_type = float_round_up;
3869
        break;
3870
    case RC_CHOP:
3871
        rnd_type = float_round_to_zero;
3872
        break;
3873
    }
3874
    set_float_rounding_mode(rnd_type, &env->fp_status);
3875
#ifdef FLOATX80
3876
    switch((env->fpuc >> 8) & 3) {
3877
    case 0:
3878
        rnd_type = 32;
3879
        break;
3880
    case 2:
3881
        rnd_type = 64;
3882
        break;
3883
    case 3:
3884
    default:
3885
        rnd_type = 80;
3886
        break;
3887
    }
3888
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3889
#endif
3890
}
3891

    
3892
#if !defined(CONFIG_USER_ONLY)
3893

    
3894
#define MMUSUFFIX _mmu
3895
#ifdef __s390__
3896
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3897
#else
3898
# define GETPC() (__builtin_return_address(0))
3899
#endif
3900

    
3901
#define SHIFT 0
3902
#include "softmmu_template.h"
3903

    
3904
#define SHIFT 1
3905
#include "softmmu_template.h"
3906

    
3907
#define SHIFT 2
3908
#include "softmmu_template.h"
3909

    
3910
#define SHIFT 3
3911
#include "softmmu_template.h"
3912

    
3913
#endif
3914

    
3915
/* try to fill the TLB and return an exception if error. If retaddr is
3916
   NULL, it means that the function was called in C code (i.e. not
3917
   from generated code or from helper.c) */
3918
/* XXX: fix it to restore all registers */
3919
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3920
{
3921
    TranslationBlock *tb;
3922
    int ret;
3923
    unsigned long pc;
3924
    CPUX86State *saved_env;
3925

    
3926
    /* XXX: hack to restore env in all cases, even if not called from
3927
       generated code */
3928
    saved_env = env;
3929
    env = cpu_single_env;
3930

    
3931
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3932
    if (ret) {
3933
        if (retaddr) {
3934
            /* now we have a real cpu fault */
3935
            pc = (unsigned long)retaddr;
3936
            tb = tb_find_pc(pc);
3937
            if (tb) {
3938
                /* the PC is inside the translated code. It means that we have
3939
                   a virtual CPU fault */
3940
                cpu_restore_state(tb, env, pc, NULL);
3941
            }
3942
        }
3943
        if (retaddr)
3944
            raise_exception_err(env->exception_index, env->error_code);
3945
        else
3946
            raise_exception_err_norestore(env->exception_index, env->error_code);
3947
    }
3948
    env = saved_env;
3949
}
3950

    
3951

    
3952
/* Secure Virtual Machine helpers */
3953

    
3954
void helper_stgi(void)
3955
{
3956
    env->hflags |= HF_GIF_MASK;
3957
}
3958

    
3959
void helper_clgi(void)
3960
{
3961
    env->hflags &= ~HF_GIF_MASK;
3962
}
3963

    
3964
#if defined(CONFIG_USER_ONLY)
3965

    
3966
void helper_vmrun(target_ulong addr) { }
3967
void helper_vmmcall(void) { }
3968
void helper_vmload(target_ulong addr) { }
3969
void helper_vmsave(target_ulong addr) { }
3970
void helper_skinit(void) { }
3971
void helper_invlpga(void) { }
3972
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3973
int svm_check_intercept_param(uint32_t type, uint64_t param)
3974
{
3975
    return 0;
3976
}
3977

    
3978
#else
3979

    
3980
static inline uint32_t
3981
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3982
{
3983
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3984
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3985
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3986
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3987
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3988
}
3989

    
3990
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3991
{
3992
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3993
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
3994
}
3995

    
3996
extern uint8_t *phys_ram_base;
3997
void helper_vmrun(target_ulong addr)
3998
{
3999
    uint32_t event_inj;
4000
    uint32_t int_ctl;
4001

    
4002
    if (loglevel & CPU_LOG_TB_IN_ASM)
4003
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4004

    
4005
    env->vm_vmcb = addr;
4006
    regs_to_env();
4007

    
4008
    /* save the current CPU state in the hsave page */
4009
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4010
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4011

    
4012
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4013
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4014

    
4015
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4016
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4017
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4018
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4019
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4020
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4021
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4022

    
4023
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4024
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4025

    
4026
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4027
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4028
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4029
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4030

    
4031
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4032
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4033
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4034

    
4035
    /* load the interception bitmaps so we do not need to access the
4036
       vmcb in svm mode */
4037
    /* We shift all the intercept bits so we can OR them with the TB
4038
       flags later on */
4039
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4040
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4041
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4042
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4043
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4044
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4045

    
4046
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4047
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4048

    
4049
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4050
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4051

    
4052
    /* clear exit_info_2 so we behave like the real hardware */
4053
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4054

    
4055
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4056
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4057
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4058
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4059
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4060
    if (int_ctl & V_INTR_MASKING_MASK) {
4061
        env->cr[8] = int_ctl & V_TPR_MASK;
4062
        if (env->eflags & IF_MASK)
4063
            env->hflags |= HF_HIF_MASK;
4064
    }
4065

    
4066
#ifdef TARGET_X86_64
4067
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4068
    env->hflags &= ~HF_LMA_MASK;
4069
    if (env->efer & MSR_EFER_LMA)
4070
       env->hflags |= HF_LMA_MASK;
4071
#endif
4072
    env->eflags = 0;
4073
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4074
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4075
    CC_OP = CC_OP_EFLAGS;
4076
    CC_DST = 0xffffffff;
4077

    
4078
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4079
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4080
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4081
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4082

    
4083
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4084
    env->eip = EIP;
4085
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4086
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4087
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4088
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4089
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4090

    
4091
    /* FIXME: guest state consistency checks */
4092

    
4093
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4094
        case TLB_CONTROL_DO_NOTHING:
4095
            break;
4096
        case TLB_CONTROL_FLUSH_ALL_ASID:
4097
            /* FIXME: this is not 100% correct but should work for now */
4098
            tlb_flush(env, 1);
4099
        break;
4100
    }
4101

    
4102
    helper_stgi();
4103

    
4104
    regs_to_env();
4105

    
4106
    /* maybe we need to inject an event */
4107
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4108
    if (event_inj & SVM_EVTINJ_VALID) {
4109
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4110
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4111
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4112
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4113

    
4114
        if (loglevel & CPU_LOG_TB_IN_ASM)
4115
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4116
        /* FIXME: need to implement valid_err */
4117
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4118
        case SVM_EVTINJ_TYPE_INTR:
4119
                env->exception_index = vector;
4120
                env->error_code = event_inj_err;
4121
                env->exception_is_int = 1;
4122
                env->exception_next_eip = -1;
4123
                if (loglevel & CPU_LOG_TB_IN_ASM)
4124
                    fprintf(logfile, "INTR");
4125
                break;
4126
        case SVM_EVTINJ_TYPE_NMI:
4127
                env->exception_index = vector;
4128
                env->error_code = event_inj_err;
4129
                env->exception_is_int = 1;
4130
                env->exception_next_eip = EIP;
4131
                if (loglevel & CPU_LOG_TB_IN_ASM)
4132
                    fprintf(logfile, "NMI");
4133
                break;
4134
        case SVM_EVTINJ_TYPE_EXEPT:
4135
                env->exception_index = vector;
4136
                env->error_code = event_inj_err;
4137
                env->exception_is_int = 0;
4138
                env->exception_next_eip = -1;
4139
                if (loglevel & CPU_LOG_TB_IN_ASM)
4140
                    fprintf(logfile, "EXEPT");
4141
                break;
4142
        case SVM_EVTINJ_TYPE_SOFT:
4143
                env->exception_index = vector;
4144
                env->error_code = event_inj_err;
4145
                env->exception_is_int = 1;
4146
                env->exception_next_eip = EIP;
4147
                if (loglevel & CPU_LOG_TB_IN_ASM)
4148
                    fprintf(logfile, "SOFT");
4149
                break;
4150
        }
4151
        if (loglevel & CPU_LOG_TB_IN_ASM)
4152
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4153
    }
4154
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4155
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4156
    }
4157

    
4158
    cpu_loop_exit();
4159
}
4160

    
4161
void helper_vmmcall(void)
4162
{
4163
    if (loglevel & CPU_LOG_TB_IN_ASM)
4164
        fprintf(logfile,"vmmcall!\n");
4165
}
4166

    
4167
void helper_vmload(target_ulong addr)
4168
{
4169
    if (loglevel & CPU_LOG_TB_IN_ASM)
4170
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4171
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4172
                env->segs[R_FS].base);
4173

    
4174
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4175
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4176
    SVM_LOAD_SEG2(addr, tr, tr);
4177
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4178

    
4179
#ifdef TARGET_X86_64
4180
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4181
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4182
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4183
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4184
#endif
4185
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4186
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4187
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4188
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4189
}
4190

    
4191
void helper_vmsave(target_ulong addr)
4192
{
4193
    if (loglevel & CPU_LOG_TB_IN_ASM)
4194
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4195
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4196
                env->segs[R_FS].base);
4197

    
4198
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4199
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4200
    SVM_SAVE_SEG(addr, tr, tr);
4201
    SVM_SAVE_SEG(addr, ldt, ldtr);
4202

    
4203
#ifdef TARGET_X86_64
4204
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4205
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4206
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4207
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4208
#endif
4209
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4210
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4211
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4212
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4213
}
4214

    
4215
void helper_skinit(void)
4216
{
4217
    if (loglevel & CPU_LOG_TB_IN_ASM)
4218
        fprintf(logfile,"skinit!\n");
4219
}
4220

    
4221
void helper_invlpga(void)
4222
{
4223
    tlb_flush(env, 0);
4224
}
4225

    
4226
int svm_check_intercept_param(uint32_t type, uint64_t param)
4227
{
4228
    switch(type) {
4229
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4230
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4231
            vmexit(type, param);
4232
            return 1;
4233
        }
4234
        break;
4235
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4236
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4237
            vmexit(type, param);
4238
            return 1;
4239
        }
4240
        break;
4241
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4242
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4243
            vmexit(type, param);
4244
            return 1;
4245
        }
4246
        break;
4247
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4248
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4249
            vmexit(type, param);
4250
            return 1;
4251
        }
4252
        break;
4253
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4254
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4255
            vmexit(type, param);
4256
            return 1;
4257
        }
4258
        break;
4259
    case SVM_EXIT_IOIO:
4260
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4261
            /* FIXME: this should be read in at vmrun (faster this way?) */
4262
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4263
            uint16_t port = (uint16_t) (param >> 16);
4264

    
4265
            if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4266
                vmexit(type, param);
4267
        }
4268
        break;
4269

    
4270
    case SVM_EXIT_MSR:
4271
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4272
            /* FIXME: this should be read in at vmrun (faster this way?) */
4273
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4274
            switch((uint32_t)ECX) {
4275
            case 0 ... 0x1fff:
4276
                T0 = (ECX * 2) % 8;
4277
                T1 = ECX / 8;
4278
                break;
4279
            case 0xc0000000 ... 0xc0001fff:
4280
                T0 = (8192 + ECX - 0xc0000000) * 2;
4281
                T1 = (T0 / 8);
4282
                T0 %= 8;
4283
                break;
4284
            case 0xc0010000 ... 0xc0011fff:
4285
                T0 = (16384 + ECX - 0xc0010000) * 2;
4286
                T1 = (T0 / 8);
4287
                T0 %= 8;
4288
                break;
4289
            default:
4290
                vmexit(type, param);
4291
                return 1;
4292
            }
4293
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4294
                vmexit(type, param);
4295
            return 1;
4296
        }
4297
        break;
4298
    default:
4299
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4300
            vmexit(type, param);
4301
            return 1;
4302
        }
4303
        break;
4304
    }
4305
    return 0;
4306
}
4307

    
4308
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4309
{
4310
    uint32_t int_ctl;
4311

    
4312
    if (loglevel & CPU_LOG_TB_IN_ASM)
4313
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4314
                exit_code, exit_info_1,
4315
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4316
                EIP);
4317

    
4318
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4319
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4320
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4321
    } else {
4322
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4323
    }
4324

    
4325
    /* Save the VM state in the vmcb */
4326
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4327
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4328
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4329
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4330

    
4331
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4332
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4333

    
4334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4335
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4336

    
4337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4338
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4339
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4340
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4342

    
4343
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4344
        int_ctl &= ~V_TPR_MASK;
4345
        int_ctl |= env->cr[8] & V_TPR_MASK;
4346
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4347
    }
4348

    
4349
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4350
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4351
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4352
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4353
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4354
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4355
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4356

    
4357
    /* Reload the host state from vm_hsave */
4358
    env->hflags &= ~HF_HIF_MASK;
4359
    env->intercept = 0;
4360
    env->intercept_exceptions = 0;
4361
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4362

    
4363
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4364
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4365

    
4366
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4367
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4368

    
4369
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4370
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4371
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4372
    if (int_ctl & V_INTR_MASKING_MASK)
4373
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4374
    /* we need to set the efer after the crs so the hidden flags get set properly */
4375
#ifdef TARGET_X86_64
4376
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4377
    env->hflags &= ~HF_LMA_MASK;
4378
    if (env->efer & MSR_EFER_LMA)
4379
       env->hflags |= HF_LMA_MASK;
4380
#endif
4381

    
4382
    env->eflags = 0;
4383
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4384
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4385
    CC_OP = CC_OP_EFLAGS;
4386

    
4387
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4388
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4389
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4390
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4391

    
4392
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4393
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4394
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4395

    
4396
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4397
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4398

    
4399
    /* other setups */
4400
    cpu_x86_set_cpl(env, 0);
4401
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4402
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4403
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4404

    
4405
    helper_clgi();
4406
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4407

    
4408
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4409

    
4410
    /* Clears the TSC_OFFSET inside the processor. */
4411

    
4412
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4413
       from the page table indicated the host's CR3. If the PDPEs contain
4414
       illegal state, the processor causes a shutdown. */
4415

    
4416
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4417
    env->cr[0] |= CR0_PE_MASK;
4418
    env->eflags &= ~VM_MASK;
4419

    
4420
    /* Disables all breakpoints in the host DR7 register. */
4421

    
4422
    /* Checks the reloaded host state for consistency. */
4423

    
4424
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4425
       host's code segment or non-canonical (in the case of long mode), a
4426
       #GP fault is delivered inside the host.) */
4427

    
4428
    /* remove any pending exception */
4429
    env->exception_index = -1;
4430
    env->error_code = 0;
4431
    env->old_exception = -1;
4432

    
4433
    regs_to_env();
4434
    cpu_loop_exit();
4435
}
4436

    
4437
#endif