Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ a35f3ec7

History | View | Annotate | Download (130.8 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        load_eflags(env->eflags, 0);
1012
        if (code64)
1013
            env->eip = env->lstar;
1014
        else
1015
            env->eip = env->cstar;
1016
    } else
1017
#endif
1018
    {
1019
        ECX = (uint32_t)(env->eip + next_eip_addend);
1020

    
1021
        cpu_x86_set_cpl(env, 0);
1022
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023
                           0, 0xffffffff,
1024
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025
                               DESC_S_MASK |
1026
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028
                               0, 0xffffffff,
1029
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030
                               DESC_S_MASK |
1031
                               DESC_W_MASK | DESC_A_MASK);
1032
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1033
        env->eip = (uint32_t)env->star;
1034
    }
1035
}
1036
#endif
1037

    
1038
void helper_sysret(int dflag)
1039
{
1040
    int cpl, selector;
1041

    
1042
    if (!(env->efer & MSR_EFER_SCE)) {
1043
        raise_exception_err(EXCP06_ILLOP, 0);
1044
    }
1045
    cpl = env->hflags & HF_CPL_MASK;
1046
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047
        raise_exception_err(EXCP0D_GPF, 0);
1048
    }
1049
    selector = (env->star >> 48) & 0xffff;
1050
#ifdef TARGET_X86_64
1051
    if (env->hflags & HF_LMA_MASK) {
1052
        if (dflag == 2) {
1053
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054
                                   0, 0xffffffff,
1055
                                   DESC_G_MASK | DESC_P_MASK |
1056
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1058
                                   DESC_L_MASK);
1059
            env->eip = ECX;
1060
        } else {
1061
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062
                                   0, 0xffffffff,
1063
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1066
            env->eip = (uint32_t)ECX;
1067
        }
1068
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1069
                               0, 0xffffffff,
1070
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072
                               DESC_W_MASK | DESC_A_MASK);
1073
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1074
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1075
        cpu_x86_set_cpl(env, 3);
1076
    } else
1077
#endif
1078
    {
1079
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1080
                               0, 0xffffffff,
1081
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1082
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1084
        env->eip = (uint32_t)ECX;
1085
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1086
                               0, 0xffffffff,
1087
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                               DESC_W_MASK | DESC_A_MASK);
1090
        env->eflags |= IF_MASK;
1091
        cpu_x86_set_cpl(env, 3);
1092
    }
1093
#ifdef USE_KQEMU
1094
    if (kqemu_is_ok(env)) {
1095
        if (env->hflags & HF_LMA_MASK)
1096
            CC_OP = CC_OP_EFLAGS;
1097
        env->exception_index = -1;
1098
        cpu_loop_exit();
1099
    }
1100
#endif
1101
}
1102

    
1103
/* real mode interrupt */
1104
static void do_interrupt_real(int intno, int is_int, int error_code,
1105
                              unsigned int next_eip)
1106
{
1107
    SegmentCache *dt;
1108
    target_ulong ptr, ssp;
1109
    int selector;
1110
    uint32_t offset, esp;
1111
    uint32_t old_cs, old_eip;
1112
    int svm_should_check = 1;
1113

    
1114
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1115
        next_eip = EIP;
1116
        svm_should_check = 0;
1117
    }
1118
    if (svm_should_check
1119
        && INTERCEPTEDl(_exceptions, 1 << intno)
1120
        && !is_int) {
1121
        raise_interrupt(intno, is_int, error_code, 0);
1122
    }
1123
    /* real mode (simpler !) */
1124
    dt = &env->idt;
1125
    if (intno * 4 + 3 > dt->limit)
1126
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1127
    ptr = dt->base + intno * 4;
1128
    offset = lduw_kernel(ptr);
1129
    selector = lduw_kernel(ptr + 2);
1130
    esp = ESP;
1131
    ssp = env->segs[R_SS].base;
1132
    if (is_int)
1133
        old_eip = next_eip;
1134
    else
1135
        old_eip = env->eip;
1136
    old_cs = env->segs[R_CS].selector;
1137
    /* XXX: use SS segment size ? */
1138
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1139
    PUSHW(ssp, esp, 0xffff, old_cs);
1140
    PUSHW(ssp, esp, 0xffff, old_eip);
1141

    
1142
    /* update processor state */
1143
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1144
    env->eip = offset;
1145
    env->segs[R_CS].selector = selector;
1146
    env->segs[R_CS].base = (selector << 4);
1147
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1148
}
1149

    
1150
/* fake user mode interrupt */
1151
void do_interrupt_user(int intno, int is_int, int error_code,
1152
                       target_ulong next_eip)
1153
{
1154
    SegmentCache *dt;
1155
    target_ulong ptr;
1156
    int dpl, cpl, shift;
1157
    uint32_t e2;
1158

    
1159
    dt = &env->idt;
1160
    if (env->hflags & HF_LMA_MASK) {
1161
        shift = 4;
1162
    } else {
1163
        shift = 3;
1164
    }
1165
    ptr = dt->base + (intno << shift);
1166
    e2 = ldl_kernel(ptr + 4);
1167

    
1168
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1169
    cpl = env->hflags & HF_CPL_MASK;
1170
    /* check privledge if software int */
1171
    if (is_int && dpl < cpl)
1172
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173

    
1174
    /* Since we emulate only user space, we cannot do more than
1175
       exiting the emulation with the suitable exception and error
1176
       code */
1177
    if (is_int)
1178
        EIP = next_eip;
1179
}
1180

    
1181
/*
1182
 * Begin execution of an interruption. is_int is TRUE if coming from
1183
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1184
 * instruction. It is only relevant if is_int is TRUE.
1185
 */
1186
void do_interrupt(int intno, int is_int, int error_code,
1187
                  target_ulong next_eip, int is_hw)
1188
{
1189
    if (loglevel & CPU_LOG_INT) {
1190
        if ((env->cr[0] & CR0_PE_MASK)) {
1191
            static int count;
1192
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1193
                    count, intno, error_code, is_int,
1194
                    env->hflags & HF_CPL_MASK,
1195
                    env->segs[R_CS].selector, EIP,
1196
                    (int)env->segs[R_CS].base + EIP,
1197
                    env->segs[R_SS].selector, ESP);
1198
            if (intno == 0x0e) {
1199
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1200
            } else {
1201
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202
            }
1203
            fprintf(logfile, "\n");
1204
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1205
#if 0
1206
            {
1207
                int i;
1208
                uint8_t *ptr;
1209
                fprintf(logfile, "       code=");
1210
                ptr = env->segs[R_CS].base + env->eip;
1211
                for(i = 0; i < 16; i++) {
1212
                    fprintf(logfile, " %02x", ldub(ptr + i));
1213
                }
1214
                fprintf(logfile, "\n");
1215
            }
1216
#endif
1217
            count++;
1218
        }
1219
    }
1220
    if (env->cr[0] & CR0_PE_MASK) {
1221
#if TARGET_X86_64
1222
        if (env->hflags & HF_LMA_MASK) {
1223
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1224
        } else
1225
#endif
1226
        {
1227
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1228
        }
1229
    } else {
1230
        do_interrupt_real(intno, is_int, error_code, next_eip);
1231
    }
1232
}
1233

    
1234
/*
1235
 * Check nested exceptions and change to double or triple fault if
1236
 * needed. It should only be called, if this is not an interrupt.
1237
 * Returns the new exception number.
1238
 */
1239
static int check_exception(int intno, int *error_code)
1240
{
1241
    char first_contributory = env->old_exception == 0 ||
1242
                              (env->old_exception >= 10 &&
1243
                               env->old_exception <= 13);
1244
    char second_contributory = intno == 0 ||
1245
                               (intno >= 10 && intno <= 13);
1246

    
1247
    if (loglevel & CPU_LOG_INT)
1248
        fprintf(logfile, "check_exception old: %x new %x\n",
1249
                env->old_exception, intno);
1250

    
1251
    if (env->old_exception == EXCP08_DBLE)
1252
        cpu_abort(env, "triple fault");
1253

    
1254
    if ((first_contributory && second_contributory)
1255
        || (env->old_exception == EXCP0E_PAGE &&
1256
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1257
        intno = EXCP08_DBLE;
1258
        *error_code = 0;
1259
    }
1260

    
1261
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1262
        (intno == EXCP08_DBLE))
1263
        env->old_exception = intno;
1264

    
1265
    return intno;
1266
}
1267

    
1268
/*
1269
 * Signal an interruption. It is executed in the main CPU loop.
1270
 * is_int is TRUE if coming from the int instruction. next_eip is the
1271
 * EIP value AFTER the interrupt instruction. It is only relevant if
1272
 * is_int is TRUE.
1273
 */
1274
void raise_interrupt(int intno, int is_int, int error_code,
1275
                     int next_eip_addend)
1276
{
1277
    if (!is_int) {
1278
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1279
        intno = check_exception(intno, &error_code);
1280
    }
1281

    
1282
    env->exception_index = intno;
1283
    env->error_code = error_code;
1284
    env->exception_is_int = is_int;
1285
    env->exception_next_eip = env->eip + next_eip_addend;
1286
    cpu_loop_exit();
1287
}
1288

    
1289
/* same as raise_exception_err, but do not restore global registers */
1290
static void raise_exception_err_norestore(int exception_index, int error_code)
1291
{
1292
    exception_index = check_exception(exception_index, &error_code);
1293

    
1294
    env->exception_index = exception_index;
1295
    env->error_code = error_code;
1296
    env->exception_is_int = 0;
1297
    env->exception_next_eip = 0;
1298
    longjmp(env->jmp_env, 1);
1299
}
1300

    
1301
/* shortcuts to generate exceptions */
1302

    
1303
void (raise_exception_err)(int exception_index, int error_code)
1304
{
1305
    raise_interrupt(exception_index, 0, error_code, 0);
1306
}
1307

    
1308
void raise_exception(int exception_index)
1309
{
1310
    raise_interrupt(exception_index, 0, 0, 0);
1311
}
1312

    
1313
/* SMM support */
1314

    
1315
#if defined(CONFIG_USER_ONLY)
1316

    
1317
void do_smm_enter(void)
1318
{
1319
}
1320

    
1321
void helper_rsm(void)
1322
{
1323
}
1324

    
1325
#else
1326

    
1327
#ifdef TARGET_X86_64
1328
#define SMM_REVISION_ID 0x00020064
1329
#else
1330
#define SMM_REVISION_ID 0x00020000
1331
#endif
1332

    
1333
void do_smm_enter(void)
1334
{
1335
    target_ulong sm_state;
1336
    SegmentCache *dt;
1337
    int i, offset;
1338

    
1339
    if (loglevel & CPU_LOG_INT) {
1340
        fprintf(logfile, "SMM: enter\n");
1341
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1342
    }
1343

    
1344
    env->hflags |= HF_SMM_MASK;
1345
    cpu_smm_update(env);
1346

    
1347
    sm_state = env->smbase + 0x8000;
1348

    
1349
#ifdef TARGET_X86_64
1350
    for(i = 0; i < 6; i++) {
1351
        dt = &env->segs[i];
1352
        offset = 0x7e00 + i * 16;
1353
        stw_phys(sm_state + offset, dt->selector);
1354
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1355
        stl_phys(sm_state + offset + 4, dt->limit);
1356
        stq_phys(sm_state + offset + 8, dt->base);
1357
    }
1358

    
1359
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1360
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1361

    
1362
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1363
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1364
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1365
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1366

    
1367
    stq_phys(sm_state + 0x7e88, env->idt.base);
1368
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1369

    
1370
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1371
    stq_phys(sm_state + 0x7e98, env->tr.base);
1372
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1373
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1374

    
1375
    stq_phys(sm_state + 0x7ed0, env->efer);
1376

    
1377
    stq_phys(sm_state + 0x7ff8, EAX);
1378
    stq_phys(sm_state + 0x7ff0, ECX);
1379
    stq_phys(sm_state + 0x7fe8, EDX);
1380
    stq_phys(sm_state + 0x7fe0, EBX);
1381
    stq_phys(sm_state + 0x7fd8, ESP);
1382
    stq_phys(sm_state + 0x7fd0, EBP);
1383
    stq_phys(sm_state + 0x7fc8, ESI);
1384
    stq_phys(sm_state + 0x7fc0, EDI);
1385
    for(i = 8; i < 16; i++)
1386
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1387
    stq_phys(sm_state + 0x7f78, env->eip);
1388
    stl_phys(sm_state + 0x7f70, compute_eflags());
1389
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1390
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1391

    
1392
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1393
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1394
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1395

    
1396
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1397
    stl_phys(sm_state + 0x7f00, env->smbase);
1398
#else
1399
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1400
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1401
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1402
    stl_phys(sm_state + 0x7ff0, env->eip);
1403
    stl_phys(sm_state + 0x7fec, EDI);
1404
    stl_phys(sm_state + 0x7fe8, ESI);
1405
    stl_phys(sm_state + 0x7fe4, EBP);
1406
    stl_phys(sm_state + 0x7fe0, ESP);
1407
    stl_phys(sm_state + 0x7fdc, EBX);
1408
    stl_phys(sm_state + 0x7fd8, EDX);
1409
    stl_phys(sm_state + 0x7fd4, ECX);
1410
    stl_phys(sm_state + 0x7fd0, EAX);
1411
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1412
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1413

    
1414
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1415
    stl_phys(sm_state + 0x7f64, env->tr.base);
1416
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1417
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1418

    
1419
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1420
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1421
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1422
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1423

    
1424
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1425
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1426

    
1427
    stl_phys(sm_state + 0x7f58, env->idt.base);
1428
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1429

    
1430
    for(i = 0; i < 6; i++) {
1431
        dt = &env->segs[i];
1432
        if (i < 3)
1433
            offset = 0x7f84 + i * 12;
1434
        else
1435
            offset = 0x7f2c + (i - 3) * 12;
1436
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1437
        stl_phys(sm_state + offset + 8, dt->base);
1438
        stl_phys(sm_state + offset + 4, dt->limit);
1439
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1440
    }
1441
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1442

    
1443
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1444
    stl_phys(sm_state + 0x7ef8, env->smbase);
1445
#endif
1446
    /* init SMM cpu state */
1447

    
1448
#ifdef TARGET_X86_64
1449
    env->efer = 0;
1450
    env->hflags &= ~HF_LMA_MASK;
1451
#endif
1452
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1453
    env->eip = 0x00008000;
1454
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1455
                           0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1460
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1461

    
1462
    cpu_x86_update_cr0(env,
1463
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1464
    cpu_x86_update_cr4(env, 0);
1465
    env->dr[7] = 0x00000400;
1466
    CC_OP = CC_OP_EFLAGS;
1467
}
1468

    
1469
void helper_rsm(void)
1470
{
1471
    target_ulong sm_state;
1472
    int i, offset;
1473
    uint32_t val;
1474

    
1475
    sm_state = env->smbase + 0x8000;
1476
#ifdef TARGET_X86_64
1477
    env->efer = ldq_phys(sm_state + 0x7ed0);
1478
    if (env->efer & MSR_EFER_LMA)
1479
        env->hflags |= HF_LMA_MASK;
1480
    else
1481
        env->hflags &= ~HF_LMA_MASK;
1482

    
1483
    for(i = 0; i < 6; i++) {
1484
        offset = 0x7e00 + i * 16;
1485
        cpu_x86_load_seg_cache(env, i,
1486
                               lduw_phys(sm_state + offset),
1487
                               ldq_phys(sm_state + offset + 8),
1488
                               ldl_phys(sm_state + offset + 4),
1489
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1490
    }
1491

    
1492
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1493
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1494

    
1495
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1496
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1497
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1498
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1499

    
1500
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1501
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1502

    
1503
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1504
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1505
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1506
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1507

    
1508
    EAX = ldq_phys(sm_state + 0x7ff8);
1509
    ECX = ldq_phys(sm_state + 0x7ff0);
1510
    EDX = ldq_phys(sm_state + 0x7fe8);
1511
    EBX = ldq_phys(sm_state + 0x7fe0);
1512
    ESP = ldq_phys(sm_state + 0x7fd8);
1513
    EBP = ldq_phys(sm_state + 0x7fd0);
1514
    ESI = ldq_phys(sm_state + 0x7fc8);
1515
    EDI = ldq_phys(sm_state + 0x7fc0);
1516
    for(i = 8; i < 16; i++)
1517
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1518
    env->eip = ldq_phys(sm_state + 0x7f78);
1519
    load_eflags(ldl_phys(sm_state + 0x7f70),
1520
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1522
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1523

    
1524
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1525
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1526
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1527

    
1528
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1529
    if (val & 0x20000) {
1530
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1531
    }
1532
#else
1533
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1534
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1535
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1536
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537
    env->eip = ldl_phys(sm_state + 0x7ff0);
1538
    EDI = ldl_phys(sm_state + 0x7fec);
1539
    ESI = ldl_phys(sm_state + 0x7fe8);
1540
    EBP = ldl_phys(sm_state + 0x7fe4);
1541
    ESP = ldl_phys(sm_state + 0x7fe0);
1542
    EBX = ldl_phys(sm_state + 0x7fdc);
1543
    EDX = ldl_phys(sm_state + 0x7fd8);
1544
    ECX = ldl_phys(sm_state + 0x7fd4);
1545
    EAX = ldl_phys(sm_state + 0x7fd0);
1546
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1547
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1548

    
1549
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1550
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1551
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1552
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1553

    
1554
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1555
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1556
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1557
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1558

    
1559
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1560
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1561

    
1562
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1563
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1564

    
1565
    for(i = 0; i < 6; i++) {
1566
        if (i < 3)
1567
            offset = 0x7f84 + i * 12;
1568
        else
1569
            offset = 0x7f2c + (i - 3) * 12;
1570
        cpu_x86_load_seg_cache(env, i,
1571
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1572
                               ldl_phys(sm_state + offset + 8),
1573
                               ldl_phys(sm_state + offset + 4),
1574
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1575
    }
1576
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1577

    
1578
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1579
    if (val & 0x20000) {
1580
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1581
    }
1582
#endif
1583
    CC_OP = CC_OP_EFLAGS;
1584
    env->hflags &= ~HF_SMM_MASK;
1585
    cpu_smm_update(env);
1586

    
1587
    if (loglevel & CPU_LOG_INT) {
1588
        fprintf(logfile, "SMM: after RSM\n");
1589
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1590
    }
1591
}
1592

    
1593
#endif /* !CONFIG_USER_ONLY */
1594

    
1595

    
1596
#ifdef BUGGY_GCC_DIV64
1597
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1598
   call it from another function */
1599
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1600
{
1601
    *q_ptr = num / den;
1602
    return num % den;
1603
}
1604

    
1605
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1606
{
1607
    *q_ptr = num / den;
1608
    return num % den;
1609
}
1610
#endif
1611

    
1612
void helper_divl_EAX_T0(target_ulong t0)
1613
{
1614
    unsigned int den, r;
1615
    uint64_t num, q;
1616

    
1617
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1618
    den = t0;
1619
    if (den == 0) {
1620
        raise_exception(EXCP00_DIVZ);
1621
    }
1622
#ifdef BUGGY_GCC_DIV64
1623
    r = div32(&q, num, den);
1624
#else
1625
    q = (num / den);
1626
    r = (num % den);
1627
#endif
1628
    if (q > 0xffffffff)
1629
        raise_exception(EXCP00_DIVZ);
1630
    EAX = (uint32_t)q;
1631
    EDX = (uint32_t)r;
1632
}
1633

    
1634
void helper_idivl_EAX_T0(target_ulong t0)
1635
{
1636
    int den, r;
1637
    int64_t num, q;
1638

    
1639
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1640
    den = t0;
1641
    if (den == 0) {
1642
        raise_exception(EXCP00_DIVZ);
1643
    }
1644
#ifdef BUGGY_GCC_DIV64
1645
    r = idiv32(&q, num, den);
1646
#else
1647
    q = (num / den);
1648
    r = (num % den);
1649
#endif
1650
    if (q != (int32_t)q)
1651
        raise_exception(EXCP00_DIVZ);
1652
    EAX = (uint32_t)q;
1653
    EDX = (uint32_t)r;
1654
}
1655

    
1656
void helper_cmpxchg8b(void)
1657
{
1658
    uint64_t d;
1659
    int eflags;
1660

    
1661
    eflags = cc_table[CC_OP].compute_all();
1662
    d = ldq(A0);
1663
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1664
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1665
        eflags |= CC_Z;
1666
    } else {
1667
        EDX = d >> 32;
1668
        EAX = d;
1669
        eflags &= ~CC_Z;
1670
    }
1671
    CC_SRC = eflags;
1672
}
1673

    
1674
void helper_single_step(void)
1675
{
1676
    env->dr[6] |= 0x4000;
1677
    raise_exception(EXCP01_SSTP);
1678
}
1679

    
1680
void helper_cpuid(void)
1681
{
1682
    uint32_t index;
1683
    index = (uint32_t)EAX;
1684

    
1685
    /* test if maximum index reached */
1686
    if (index & 0x80000000) {
1687
        if (index > env->cpuid_xlevel)
1688
            index = env->cpuid_level;
1689
    } else {
1690
        if (index > env->cpuid_level)
1691
            index = env->cpuid_level;
1692
    }
1693

    
1694
    switch(index) {
1695
    case 0:
1696
        EAX = env->cpuid_level;
1697
        EBX = env->cpuid_vendor1;
1698
        EDX = env->cpuid_vendor2;
1699
        ECX = env->cpuid_vendor3;
1700
        break;
1701
    case 1:
1702
        EAX = env->cpuid_version;
1703
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1704
        ECX = env->cpuid_ext_features;
1705
        EDX = env->cpuid_features;
1706
        break;
1707
    case 2:
1708
        /* cache info: needed for Pentium Pro compatibility */
1709
        EAX = 1;
1710
        EBX = 0;
1711
        ECX = 0;
1712
        EDX = 0x2c307d;
1713
        break;
1714
    case 0x80000000:
1715
        EAX = env->cpuid_xlevel;
1716
        EBX = env->cpuid_vendor1;
1717
        EDX = env->cpuid_vendor2;
1718
        ECX = env->cpuid_vendor3;
1719
        break;
1720
    case 0x80000001:
1721
        EAX = env->cpuid_features;
1722
        EBX = 0;
1723
        ECX = env->cpuid_ext3_features;
1724
        EDX = env->cpuid_ext2_features;
1725
        break;
1726
    case 0x80000002:
1727
    case 0x80000003:
1728
    case 0x80000004:
1729
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1730
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1731
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1732
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1733
        break;
1734
    case 0x80000005:
1735
        /* cache info (L1 cache) */
1736
        EAX = 0x01ff01ff;
1737
        EBX = 0x01ff01ff;
1738
        ECX = 0x40020140;
1739
        EDX = 0x40020140;
1740
        break;
1741
    case 0x80000006:
1742
        /* cache info (L2 cache) */
1743
        EAX = 0;
1744
        EBX = 0x42004200;
1745
        ECX = 0x02008140;
1746
        EDX = 0;
1747
        break;
1748
    case 0x80000008:
1749
        /* virtual & phys address size in low 2 bytes. */
1750
        EAX = 0x00003028;
1751
        EBX = 0;
1752
        ECX = 0;
1753
        EDX = 0;
1754
        break;
1755
    case 0x8000000A:
1756
        EAX = 0x00000001;
1757
        EBX = 0;
1758
        ECX = 0;
1759
        EDX = 0;
1760
        break;
1761
    default:
1762
        /* reserved values: zero */
1763
        EAX = 0;
1764
        EBX = 0;
1765
        ECX = 0;
1766
        EDX = 0;
1767
        break;
1768
    }
1769
}
1770

    
1771
void helper_enter_level(int level, int data32)
1772
{
1773
    target_ulong ssp;
1774
    uint32_t esp_mask, esp, ebp;
1775

    
1776
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1777
    ssp = env->segs[R_SS].base;
1778
    ebp = EBP;
1779
    esp = ESP;
1780
    if (data32) {
1781
        /* 32 bit */
1782
        esp -= 4;
1783
        while (--level) {
1784
            esp -= 4;
1785
            ebp -= 4;
1786
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1787
        }
1788
        esp -= 4;
1789
        stl(ssp + (esp & esp_mask), T1);
1790
    } else {
1791
        /* 16 bit */
1792
        esp -= 2;
1793
        while (--level) {
1794
            esp -= 2;
1795
            ebp -= 2;
1796
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1797
        }
1798
        esp -= 2;
1799
        stw(ssp + (esp & esp_mask), T1);
1800
    }
1801
}
1802

    
1803
#ifdef TARGET_X86_64
1804
void helper_enter64_level(int level, int data64)
1805
{
1806
    target_ulong esp, ebp;
1807
    ebp = EBP;
1808
    esp = ESP;
1809

    
1810
    if (data64) {
1811
        /* 64 bit */
1812
        esp -= 8;
1813
        while (--level) {
1814
            esp -= 8;
1815
            ebp -= 8;
1816
            stq(esp, ldq(ebp));
1817
        }
1818
        esp -= 8;
1819
        stq(esp, T1);
1820
    } else {
1821
        /* 16 bit */
1822
        esp -= 2;
1823
        while (--level) {
1824
            esp -= 2;
1825
            ebp -= 2;
1826
            stw(esp, lduw(ebp));
1827
        }
1828
        esp -= 2;
1829
        stw(esp, T1);
1830
    }
1831
}
1832
#endif
1833

    
1834
void helper_lldt_T0(void)
1835
{
1836
    int selector;
1837
    SegmentCache *dt;
1838
    uint32_t e1, e2;
1839
    int index, entry_limit;
1840
    target_ulong ptr;
1841

    
1842
    selector = T0 & 0xffff;
1843
    if ((selector & 0xfffc) == 0) {
1844
        /* XXX: NULL selector case: invalid LDT */
1845
        env->ldt.base = 0;
1846
        env->ldt.limit = 0;
1847
    } else {
1848
        if (selector & 0x4)
1849
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1850
        dt = &env->gdt;
1851
        index = selector & ~7;
1852
#ifdef TARGET_X86_64
1853
        if (env->hflags & HF_LMA_MASK)
1854
            entry_limit = 15;
1855
        else
1856
#endif
1857
            entry_limit = 7;
1858
        if ((index + entry_limit) > dt->limit)
1859
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1860
        ptr = dt->base + index;
1861
        e1 = ldl_kernel(ptr);
1862
        e2 = ldl_kernel(ptr + 4);
1863
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1864
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1865
        if (!(e2 & DESC_P_MASK))
1866
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1867
#ifdef TARGET_X86_64
1868
        if (env->hflags & HF_LMA_MASK) {
1869
            uint32_t e3;
1870
            e3 = ldl_kernel(ptr + 8);
1871
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1872
            env->ldt.base |= (target_ulong)e3 << 32;
1873
        } else
1874
#endif
1875
        {
1876
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1877
        }
1878
    }
1879
    env->ldt.selector = selector;
1880
}
1881

    
1882
void helper_ltr_T0(void)
1883
{
1884
    int selector;
1885
    SegmentCache *dt;
1886
    uint32_t e1, e2;
1887
    int index, type, entry_limit;
1888
    target_ulong ptr;
1889

    
1890
    selector = T0 & 0xffff;
1891
    if ((selector & 0xfffc) == 0) {
1892
        /* NULL selector case: invalid TR */
1893
        env->tr.base = 0;
1894
        env->tr.limit = 0;
1895
        env->tr.flags = 0;
1896
    } else {
1897
        if (selector & 0x4)
1898
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1899
        dt = &env->gdt;
1900
        index = selector & ~7;
1901
#ifdef TARGET_X86_64
1902
        if (env->hflags & HF_LMA_MASK)
1903
            entry_limit = 15;
1904
        else
1905
#endif
1906
            entry_limit = 7;
1907
        if ((index + entry_limit) > dt->limit)
1908
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1909
        ptr = dt->base + index;
1910
        e1 = ldl_kernel(ptr);
1911
        e2 = ldl_kernel(ptr + 4);
1912
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1913
        if ((e2 & DESC_S_MASK) ||
1914
            (type != 1 && type != 9))
1915
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1916
        if (!(e2 & DESC_P_MASK))
1917
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1918
#ifdef TARGET_X86_64
1919
        if (env->hflags & HF_LMA_MASK) {
1920
            uint32_t e3, e4;
1921
            e3 = ldl_kernel(ptr + 8);
1922
            e4 = ldl_kernel(ptr + 12);
1923
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1924
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1925
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1926
            env->tr.base |= (target_ulong)e3 << 32;
1927
        } else
1928
#endif
1929
        {
1930
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1931
        }
1932
        e2 |= DESC_TSS_BUSY_MASK;
1933
        stl_kernel(ptr + 4, e2);
1934
    }
1935
    env->tr.selector = selector;
1936
}
1937

    
1938
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1939
void load_seg(int seg_reg, int selector)
1940
{
1941
    uint32_t e1, e2;
1942
    int cpl, dpl, rpl;
1943
    SegmentCache *dt;
1944
    int index;
1945
    target_ulong ptr;
1946

    
1947
    selector &= 0xffff;
1948
    cpl = env->hflags & HF_CPL_MASK;
1949
    if ((selector & 0xfffc) == 0) {
1950
        /* null selector case */
1951
        if (seg_reg == R_SS
1952
#ifdef TARGET_X86_64
1953
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1954
#endif
1955
            )
1956
            raise_exception_err(EXCP0D_GPF, 0);
1957
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1958
    } else {
1959

    
1960
        if (selector & 0x4)
1961
            dt = &env->ldt;
1962
        else
1963
            dt = &env->gdt;
1964
        index = selector & ~7;
1965
        if ((index + 7) > dt->limit)
1966
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1967
        ptr = dt->base + index;
1968
        e1 = ldl_kernel(ptr);
1969
        e2 = ldl_kernel(ptr + 4);
1970

    
1971
        if (!(e2 & DESC_S_MASK))
1972
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1973
        rpl = selector & 3;
1974
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1975
        if (seg_reg == R_SS) {
1976
            /* must be writable segment */
1977
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1978
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1979
            if (rpl != cpl || dpl != cpl)
1980
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1981
        } else {
1982
            /* must be readable segment */
1983
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1984
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1985

    
1986
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1987
                /* if not conforming code, test rights */
1988
                if (dpl < cpl || dpl < rpl)
1989
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1990
            }
1991
        }
1992

    
1993
        if (!(e2 & DESC_P_MASK)) {
1994
            if (seg_reg == R_SS)
1995
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1996
            else
1997
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1998
        }
1999

    
2000
        /* set the access bit if not already set */
2001
        if (!(e2 & DESC_A_MASK)) {
2002
            e2 |= DESC_A_MASK;
2003
            stl_kernel(ptr + 4, e2);
2004
        }
2005

    
2006
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2007
                       get_seg_base(e1, e2),
2008
                       get_seg_limit(e1, e2),
2009
                       e2);
2010
#if 0
2011
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2012
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2013
#endif
2014
    }
2015
}
2016

    
2017
/* protected mode jump */
2018
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2019
{
2020
    int new_cs, gate_cs, type;
2021
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2022
    target_ulong new_eip, next_eip;
2023

    
2024
    new_cs = T0;
2025
    new_eip = T1;
2026
    if ((new_cs & 0xfffc) == 0)
2027
        raise_exception_err(EXCP0D_GPF, 0);
2028
    if (load_segment(&e1, &e2, new_cs) != 0)
2029
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2030
    cpl = env->hflags & HF_CPL_MASK;
2031
    if (e2 & DESC_S_MASK) {
2032
        if (!(e2 & DESC_CS_MASK))
2033
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2034
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2035
        if (e2 & DESC_C_MASK) {
2036
            /* conforming code segment */
2037
            if (dpl > cpl)
2038
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2039
        } else {
2040
            /* non conforming code segment */
2041
            rpl = new_cs & 3;
2042
            if (rpl > cpl)
2043
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2044
            if (dpl != cpl)
2045
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2046
        }
2047
        if (!(e2 & DESC_P_MASK))
2048
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2049
        limit = get_seg_limit(e1, e2);
2050
        if (new_eip > limit &&
2051
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2052
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2053
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2054
                       get_seg_base(e1, e2), limit, e2);
2055
        EIP = new_eip;
2056
    } else {
2057
        /* jump to call or task gate */
2058
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2059
        rpl = new_cs & 3;
2060
        cpl = env->hflags & HF_CPL_MASK;
2061
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2062
        switch(type) {
2063
        case 1: /* 286 TSS */
2064
        case 9: /* 386 TSS */
2065
        case 5: /* task gate */
2066
            if (dpl < cpl || dpl < rpl)
2067
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2068
            next_eip = env->eip + next_eip_addend;
2069
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2070
            CC_OP = CC_OP_EFLAGS;
2071
            break;
2072
        case 4: /* 286 call gate */
2073
        case 12: /* 386 call gate */
2074
            if ((dpl < cpl) || (dpl < rpl))
2075
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2076
            if (!(e2 & DESC_P_MASK))
2077
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2078
            gate_cs = e1 >> 16;
2079
            new_eip = (e1 & 0xffff);
2080
            if (type == 12)
2081
                new_eip |= (e2 & 0xffff0000);
2082
            if (load_segment(&e1, &e2, gate_cs) != 0)
2083
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2084
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2085
            /* must be code segment */
2086
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2087
                 (DESC_S_MASK | DESC_CS_MASK)))
2088
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2089
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2090
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2091
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2092
            if (!(e2 & DESC_P_MASK))
2093
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2094
            limit = get_seg_limit(e1, e2);
2095
            if (new_eip > limit)
2096
                raise_exception_err(EXCP0D_GPF, 0);
2097
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2098
                                   get_seg_base(e1, e2), limit, e2);
2099
            EIP = new_eip;
2100
            break;
2101
        default:
2102
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2103
            break;
2104
        }
2105
    }
2106
}
2107

    
2108
/* real mode call */
2109
void helper_lcall_real_T0_T1(int shift, int next_eip)
2110
{
2111
    int new_cs, new_eip;
2112
    uint32_t esp, esp_mask;
2113
    target_ulong ssp;
2114

    
2115
    new_cs = T0;
2116
    new_eip = T1;
2117
    esp = ESP;
2118
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2119
    ssp = env->segs[R_SS].base;
2120
    if (shift) {
2121
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2122
        PUSHL(ssp, esp, esp_mask, next_eip);
2123
    } else {
2124
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2125
        PUSHW(ssp, esp, esp_mask, next_eip);
2126
    }
2127

    
2128
    SET_ESP(esp, esp_mask);
2129
    env->eip = new_eip;
2130
    env->segs[R_CS].selector = new_cs;
2131
    env->segs[R_CS].base = (new_cs << 4);
2132
}
2133

    
2134
/* protected mode call */
2135
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2136
{
2137
    int new_cs, new_stack, i;
2138
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2139
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2140
    uint32_t val, limit, old_sp_mask;
2141
    target_ulong ssp, old_ssp, next_eip, new_eip;
2142

    
2143
    new_cs = T0;
2144
    new_eip = T1;
2145
    next_eip = env->eip + next_eip_addend;
2146
#ifdef DEBUG_PCALL
2147
    if (loglevel & CPU_LOG_PCALL) {
2148
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2149
                new_cs, (uint32_t)new_eip, shift);
2150
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2151
    }
2152
#endif
2153
    if ((new_cs & 0xfffc) == 0)
2154
        raise_exception_err(EXCP0D_GPF, 0);
2155
    if (load_segment(&e1, &e2, new_cs) != 0)
2156
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2157
    cpl = env->hflags & HF_CPL_MASK;
2158
#ifdef DEBUG_PCALL
2159
    if (loglevel & CPU_LOG_PCALL) {
2160
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2161
    }
2162
#endif
2163
    if (e2 & DESC_S_MASK) {
2164
        if (!(e2 & DESC_CS_MASK))
2165
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2166
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2167
        if (e2 & DESC_C_MASK) {
2168
            /* conforming code segment */
2169
            if (dpl > cpl)
2170
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171
        } else {
2172
            /* non conforming code segment */
2173
            rpl = new_cs & 3;
2174
            if (rpl > cpl)
2175
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2176
            if (dpl != cpl)
2177
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178
        }
2179
        if (!(e2 & DESC_P_MASK))
2180
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2181

    
2182
#ifdef TARGET_X86_64
2183
        /* XXX: check 16/32 bit cases in long mode */
2184
        if (shift == 2) {
2185
            target_ulong rsp;
2186
            /* 64 bit case */
2187
            rsp = ESP;
2188
            PUSHQ(rsp, env->segs[R_CS].selector);
2189
            PUSHQ(rsp, next_eip);
2190
            /* from this point, not restartable */
2191
            ESP = rsp;
2192
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2193
                                   get_seg_base(e1, e2),
2194
                                   get_seg_limit(e1, e2), e2);
2195
            EIP = new_eip;
2196
        } else
2197
#endif
2198
        {
2199
            sp = ESP;
2200
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2201
            ssp = env->segs[R_SS].base;
2202
            if (shift) {
2203
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2204
                PUSHL(ssp, sp, sp_mask, next_eip);
2205
            } else {
2206
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2207
                PUSHW(ssp, sp, sp_mask, next_eip);
2208
            }
2209

    
2210
            limit = get_seg_limit(e1, e2);
2211
            if (new_eip > limit)
2212
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213
            /* from this point, not restartable */
2214
            SET_ESP(sp, sp_mask);
2215
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2216
                                   get_seg_base(e1, e2), limit, e2);
2217
            EIP = new_eip;
2218
        }
2219
    } else {
2220
        /* check gate type */
2221
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2222
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2223
        rpl = new_cs & 3;
2224
        switch(type) {
2225
        case 1: /* available 286 TSS */
2226
        case 9: /* available 386 TSS */
2227
        case 5: /* task gate */
2228
            if (dpl < cpl || dpl < rpl)
2229
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2230
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2231
            CC_OP = CC_OP_EFLAGS;
2232
            return;
2233
        case 4: /* 286 call gate */
2234
        case 12: /* 386 call gate */
2235
            break;
2236
        default:
2237
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2238
            break;
2239
        }
2240
        shift = type >> 3;
2241

    
2242
        if (dpl < cpl || dpl < rpl)
2243
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244
        /* check valid bit */
2245
        if (!(e2 & DESC_P_MASK))
2246
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2247
        selector = e1 >> 16;
2248
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2249
        param_count = e2 & 0x1f;
2250
        if ((selector & 0xfffc) == 0)
2251
            raise_exception_err(EXCP0D_GPF, 0);
2252

    
2253
        if (load_segment(&e1, &e2, selector) != 0)
2254
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2255
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2256
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2257
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2258
        if (dpl > cpl)
2259
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2260
        if (!(e2 & DESC_P_MASK))
2261
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2262

    
2263
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2264
            /* to inner privilege */
2265
            get_ss_esp_from_tss(&ss, &sp, dpl);
2266
#ifdef DEBUG_PCALL
2267
            if (loglevel & CPU_LOG_PCALL)
2268
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2269
                        ss, sp, param_count, ESP);
2270
#endif
2271
            if ((ss & 0xfffc) == 0)
2272
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2273
            if ((ss & 3) != dpl)
2274
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2275
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2276
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2277
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2278
            if (ss_dpl != dpl)
2279
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2280
            if (!(ss_e2 & DESC_S_MASK) ||
2281
                (ss_e2 & DESC_CS_MASK) ||
2282
                !(ss_e2 & DESC_W_MASK))
2283
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2284
            if (!(ss_e2 & DESC_P_MASK))
2285
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2286

    
2287
            //            push_size = ((param_count * 2) + 8) << shift;
2288

    
2289
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2290
            old_ssp = env->segs[R_SS].base;
2291

    
2292
            sp_mask = get_sp_mask(ss_e2);
2293
            ssp = get_seg_base(ss_e1, ss_e2);
2294
            if (shift) {
2295
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296
                PUSHL(ssp, sp, sp_mask, ESP);
2297
                for(i = param_count - 1; i >= 0; i--) {
2298
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2299
                    PUSHL(ssp, sp, sp_mask, val);
2300
                }
2301
            } else {
2302
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2303
                PUSHW(ssp, sp, sp_mask, ESP);
2304
                for(i = param_count - 1; i >= 0; i--) {
2305
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2306
                    PUSHW(ssp, sp, sp_mask, val);
2307
                }
2308
            }
2309
            new_stack = 1;
2310
        } else {
2311
            /* to same privilege */
2312
            sp = ESP;
2313
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2314
            ssp = env->segs[R_SS].base;
2315
            //            push_size = (4 << shift);
2316
            new_stack = 0;
2317
        }
2318

    
2319
        if (shift) {
2320
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2321
            PUSHL(ssp, sp, sp_mask, next_eip);
2322
        } else {
2323
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2324
            PUSHW(ssp, sp, sp_mask, next_eip);
2325
        }
2326

    
2327
        /* from this point, not restartable */
2328

    
2329
        if (new_stack) {
2330
            ss = (ss & ~3) | dpl;
2331
            cpu_x86_load_seg_cache(env, R_SS, ss,
2332
                                   ssp,
2333
                                   get_seg_limit(ss_e1, ss_e2),
2334
                                   ss_e2);
2335
        }
2336

    
2337
        selector = (selector & ~3) | dpl;
2338
        cpu_x86_load_seg_cache(env, R_CS, selector,
2339
                       get_seg_base(e1, e2),
2340
                       get_seg_limit(e1, e2),
2341
                       e2);
2342
        cpu_x86_set_cpl(env, dpl);
2343
        SET_ESP(sp, sp_mask);
2344
        EIP = offset;
2345
    }
2346
#ifdef USE_KQEMU
2347
    if (kqemu_is_ok(env)) {
2348
        env->exception_index = -1;
2349
        cpu_loop_exit();
2350
    }
2351
#endif
2352
}
2353

    
2354
/* real and vm86 mode iret */
2355
void helper_iret_real(int shift)
2356
{
2357
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2358
    target_ulong ssp;
2359
    int eflags_mask;
2360

    
2361
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2362
    sp = ESP;
2363
    ssp = env->segs[R_SS].base;
2364
    if (shift == 1) {
2365
        /* 32 bits */
2366
        POPL(ssp, sp, sp_mask, new_eip);
2367
        POPL(ssp, sp, sp_mask, new_cs);
2368
        new_cs &= 0xffff;
2369
        POPL(ssp, sp, sp_mask, new_eflags);
2370
    } else {
2371
        /* 16 bits */
2372
        POPW(ssp, sp, sp_mask, new_eip);
2373
        POPW(ssp, sp, sp_mask, new_cs);
2374
        POPW(ssp, sp, sp_mask, new_eflags);
2375
    }
2376
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2377
    load_seg_vm(R_CS, new_cs);
2378
    env->eip = new_eip;
2379
    if (env->eflags & VM_MASK)
2380
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2381
    else
2382
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2383
    if (shift == 0)
2384
        eflags_mask &= 0xffff;
2385
    load_eflags(new_eflags, eflags_mask);
2386
}
2387

    
2388
static inline void validate_seg(int seg_reg, int cpl)
2389
{
2390
    int dpl;
2391
    uint32_t e2;
2392

    
2393
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2394
       they may still contain a valid base. I would be interested to
2395
       know how a real x86_64 CPU behaves */
2396
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2397
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2398
        return;
2399

    
2400
    e2 = env->segs[seg_reg].flags;
2401
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2402
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2403
        /* data or non conforming code segment */
2404
        if (dpl < cpl) {
2405
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2406
        }
2407
    }
2408
}
2409

    
2410
/* protected mode iret */
2411
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2412
{
2413
    uint32_t new_cs, new_eflags, new_ss;
2414
    uint32_t new_es, new_ds, new_fs, new_gs;
2415
    uint32_t e1, e2, ss_e1, ss_e2;
2416
    int cpl, dpl, rpl, eflags_mask, iopl;
2417
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2418

    
2419
#ifdef TARGET_X86_64
2420
    if (shift == 2)
2421
        sp_mask = -1;
2422
    else
2423
#endif
2424
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2425
    sp = ESP;
2426
    ssp = env->segs[R_SS].base;
2427
    new_eflags = 0; /* avoid warning */
2428
#ifdef TARGET_X86_64
2429
    if (shift == 2) {
2430
        POPQ(sp, new_eip);
2431
        POPQ(sp, new_cs);
2432
        new_cs &= 0xffff;
2433
        if (is_iret) {
2434
            POPQ(sp, new_eflags);
2435
        }
2436
    } else
2437
#endif
2438
    if (shift == 1) {
2439
        /* 32 bits */
2440
        POPL(ssp, sp, sp_mask, new_eip);
2441
        POPL(ssp, sp, sp_mask, new_cs);
2442
        new_cs &= 0xffff;
2443
        if (is_iret) {
2444
            POPL(ssp, sp, sp_mask, new_eflags);
2445
            if (new_eflags & VM_MASK)
2446
                goto return_to_vm86;
2447
        }
2448
    } else {
2449
        /* 16 bits */
2450
        POPW(ssp, sp, sp_mask, new_eip);
2451
        POPW(ssp, sp, sp_mask, new_cs);
2452
        if (is_iret)
2453
            POPW(ssp, sp, sp_mask, new_eflags);
2454
    }
2455
#ifdef DEBUG_PCALL
2456
    if (loglevel & CPU_LOG_PCALL) {
2457
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2458
                new_cs, new_eip, shift, addend);
2459
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2460
    }
2461
#endif
2462
    if ((new_cs & 0xfffc) == 0)
2463
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2464
    if (load_segment(&e1, &e2, new_cs) != 0)
2465
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2466
    if (!(e2 & DESC_S_MASK) ||
2467
        !(e2 & DESC_CS_MASK))
2468
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2469
    cpl = env->hflags & HF_CPL_MASK;
2470
    rpl = new_cs & 3;
2471
    if (rpl < cpl)
2472
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2473
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2474
    if (e2 & DESC_C_MASK) {
2475
        if (dpl > rpl)
2476
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2477
    } else {
2478
        if (dpl != rpl)
2479
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2480
    }
2481
    if (!(e2 & DESC_P_MASK))
2482
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2483

    
2484
    sp += addend;
2485
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2486
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2487
        /* return to same priledge level */
2488
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2489
                       get_seg_base(e1, e2),
2490
                       get_seg_limit(e1, e2),
2491
                       e2);
2492
    } else {
2493
        /* return to different privilege level */
2494
#ifdef TARGET_X86_64
2495
        if (shift == 2) {
2496
            POPQ(sp, new_esp);
2497
            POPQ(sp, new_ss);
2498
            new_ss &= 0xffff;
2499
        } else
2500
#endif
2501
        if (shift == 1) {
2502
            /* 32 bits */
2503
            POPL(ssp, sp, sp_mask, new_esp);
2504
            POPL(ssp, sp, sp_mask, new_ss);
2505
            new_ss &= 0xffff;
2506
        } else {
2507
            /* 16 bits */
2508
            POPW(ssp, sp, sp_mask, new_esp);
2509
            POPW(ssp, sp, sp_mask, new_ss);
2510
        }
2511
#ifdef DEBUG_PCALL
2512
        if (loglevel & CPU_LOG_PCALL) {
2513
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2514
                    new_ss, new_esp);
2515
        }
2516
#endif
2517
        if ((new_ss & 0xfffc) == 0) {
2518
#ifdef TARGET_X86_64
2519
            /* NULL ss is allowed in long mode if cpl != 3*/
2520
            /* XXX: test CS64 ? */
2521
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2522
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2523
                                       0, 0xffffffff,
2524
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2525
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2526
                                       DESC_W_MASK | DESC_A_MASK);
2527
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2528
            } else
2529
#endif
2530
            {
2531
                raise_exception_err(EXCP0D_GPF, 0);
2532
            }
2533
        } else {
2534
            if ((new_ss & 3) != rpl)
2535
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2536
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2537
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2538
            if (!(ss_e2 & DESC_S_MASK) ||
2539
                (ss_e2 & DESC_CS_MASK) ||
2540
                !(ss_e2 & DESC_W_MASK))
2541
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2542
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2543
            if (dpl != rpl)
2544
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2545
            if (!(ss_e2 & DESC_P_MASK))
2546
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2547
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2548
                                   get_seg_base(ss_e1, ss_e2),
2549
                                   get_seg_limit(ss_e1, ss_e2),
2550
                                   ss_e2);
2551
        }
2552

    
2553
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2554
                       get_seg_base(e1, e2),
2555
                       get_seg_limit(e1, e2),
2556
                       e2);
2557
        cpu_x86_set_cpl(env, rpl);
2558
        sp = new_esp;
2559
#ifdef TARGET_X86_64
2560
        if (env->hflags & HF_CS64_MASK)
2561
            sp_mask = -1;
2562
        else
2563
#endif
2564
            sp_mask = get_sp_mask(ss_e2);
2565

    
2566
        /* validate data segments */
2567
        validate_seg(R_ES, rpl);
2568
        validate_seg(R_DS, rpl);
2569
        validate_seg(R_FS, rpl);
2570
        validate_seg(R_GS, rpl);
2571

    
2572
        sp += addend;
2573
    }
2574
    SET_ESP(sp, sp_mask);
2575
    env->eip = new_eip;
2576
    if (is_iret) {
2577
        /* NOTE: 'cpl' is the _old_ CPL */
2578
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2579
        if (cpl == 0)
2580
            eflags_mask |= IOPL_MASK;
2581
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2582
        if (cpl <= iopl)
2583
            eflags_mask |= IF_MASK;
2584
        if (shift == 0)
2585
            eflags_mask &= 0xffff;
2586
        load_eflags(new_eflags, eflags_mask);
2587
    }
2588
    return;
2589

    
2590
 return_to_vm86:
2591
    POPL(ssp, sp, sp_mask, new_esp);
2592
    POPL(ssp, sp, sp_mask, new_ss);
2593
    POPL(ssp, sp, sp_mask, new_es);
2594
    POPL(ssp, sp, sp_mask, new_ds);
2595
    POPL(ssp, sp, sp_mask, new_fs);
2596
    POPL(ssp, sp, sp_mask, new_gs);
2597

    
2598
    /* modify processor state */
2599
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2600
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2601
    load_seg_vm(R_CS, new_cs & 0xffff);
2602
    cpu_x86_set_cpl(env, 3);
2603
    load_seg_vm(R_SS, new_ss & 0xffff);
2604
    load_seg_vm(R_ES, new_es & 0xffff);
2605
    load_seg_vm(R_DS, new_ds & 0xffff);
2606
    load_seg_vm(R_FS, new_fs & 0xffff);
2607
    load_seg_vm(R_GS, new_gs & 0xffff);
2608

    
2609
    env->eip = new_eip & 0xffff;
2610
    ESP = new_esp;
2611
}
2612

    
2613
void helper_iret_protected(int shift, int next_eip)
2614
{
2615
    int tss_selector, type;
2616
    uint32_t e1, e2;
2617

    
2618
    /* specific case for TSS */
2619
    if (env->eflags & NT_MASK) {
2620
#ifdef TARGET_X86_64
2621
        if (env->hflags & HF_LMA_MASK)
2622
            raise_exception_err(EXCP0D_GPF, 0);
2623
#endif
2624
        tss_selector = lduw_kernel(env->tr.base + 0);
2625
        if (tss_selector & 4)
2626
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2627
        if (load_segment(&e1, &e2, tss_selector) != 0)
2628
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2629
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2630
        /* NOTE: we check both segment and busy TSS */
2631
        if (type != 3)
2632
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2633
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2634
    } else {
2635
        helper_ret_protected(shift, 1, 0);
2636
    }
2637
#ifdef USE_KQEMU
2638
    if (kqemu_is_ok(env)) {
2639
        CC_OP = CC_OP_EFLAGS;
2640
        env->exception_index = -1;
2641
        cpu_loop_exit();
2642
    }
2643
#endif
2644
}
2645

    
2646
void helper_lret_protected(int shift, int addend)
2647
{
2648
    helper_ret_protected(shift, 0, addend);
2649
#ifdef USE_KQEMU
2650
    if (kqemu_is_ok(env)) {
2651
        env->exception_index = -1;
2652
        cpu_loop_exit();
2653
    }
2654
#endif
2655
}
2656

    
2657
void helper_sysenter(void)
2658
{
2659
    if (env->sysenter_cs == 0) {
2660
        raise_exception_err(EXCP0D_GPF, 0);
2661
    }
2662
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2663
    cpu_x86_set_cpl(env, 0);
2664
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2665
                           0, 0xffffffff,
2666
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2667
                           DESC_S_MASK |
2668
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2669
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2670
                           0, 0xffffffff,
2671
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2672
                           DESC_S_MASK |
2673
                           DESC_W_MASK | DESC_A_MASK);
2674
    ESP = env->sysenter_esp;
2675
    EIP = env->sysenter_eip;
2676
}
2677

    
2678
void helper_sysexit(void)
2679
{
2680
    int cpl;
2681

    
2682
    cpl = env->hflags & HF_CPL_MASK;
2683
    if (env->sysenter_cs == 0 || cpl != 0) {
2684
        raise_exception_err(EXCP0D_GPF, 0);
2685
    }
2686
    cpu_x86_set_cpl(env, 3);
2687
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2688
                           0, 0xffffffff,
2689
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2690
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2691
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2692
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2693
                           0, 0xffffffff,
2694
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2695
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2696
                           DESC_W_MASK | DESC_A_MASK);
2697
    ESP = ECX;
2698
    EIP = EDX;
2699
#ifdef USE_KQEMU
2700
    if (kqemu_is_ok(env)) {
2701
        env->exception_index = -1;
2702
        cpu_loop_exit();
2703
    }
2704
#endif
2705
}
2706

    
2707
void helper_movl_crN_T0(int reg)
2708
{
2709
#if !defined(CONFIG_USER_ONLY)
2710
    switch(reg) {
2711
    case 0:
2712
        cpu_x86_update_cr0(env, T0);
2713
        break;
2714
    case 3:
2715
        cpu_x86_update_cr3(env, T0);
2716
        break;
2717
    case 4:
2718
        cpu_x86_update_cr4(env, T0);
2719
        break;
2720
    case 8:
2721
        cpu_set_apic_tpr(env, T0);
2722
        env->cr[8] = T0;
2723
        break;
2724
    default:
2725
        env->cr[reg] = T0;
2726
        break;
2727
    }
2728
#endif
2729
}
2730

    
2731
/* XXX: do more */
2732
void helper_movl_drN_T0(int reg)
2733
{
2734
    env->dr[reg] = T0;
2735
}
2736

    
2737
void helper_invlpg(target_ulong addr)
2738
{
2739
    cpu_x86_flush_tlb(env, addr);
2740
}
2741

    
2742
void helper_rdtsc(void)
2743
{
2744
    uint64_t val;
2745

    
2746
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2747
        raise_exception(EXCP0D_GPF);
2748
    }
2749
    val = cpu_get_tsc(env);
2750
    EAX = (uint32_t)(val);
2751
    EDX = (uint32_t)(val >> 32);
2752
}
2753

    
2754
void helper_rdpmc(void)
2755
{
2756
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2757
        raise_exception(EXCP0D_GPF);
2758
    }
2759

    
2760
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2761
        /* currently unimplemented */
2762
        raise_exception_err(EXCP06_ILLOP, 0);
2763
    }
2764
}
2765

    
2766
#if defined(CONFIG_USER_ONLY)
2767
void helper_wrmsr(void)
2768
{
2769
}
2770

    
2771
void helper_rdmsr(void)
2772
{
2773
}
2774
#else
2775
void helper_wrmsr(void)
2776
{
2777
    uint64_t val;
2778

    
2779
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2780

    
2781
    switch((uint32_t)ECX) {
2782
    case MSR_IA32_SYSENTER_CS:
2783
        env->sysenter_cs = val & 0xffff;
2784
        break;
2785
    case MSR_IA32_SYSENTER_ESP:
2786
        env->sysenter_esp = val;
2787
        break;
2788
    case MSR_IA32_SYSENTER_EIP:
2789
        env->sysenter_eip = val;
2790
        break;
2791
    case MSR_IA32_APICBASE:
2792
        cpu_set_apic_base(env, val);
2793
        break;
2794
    case MSR_EFER:
2795
        {
2796
            uint64_t update_mask;
2797
            update_mask = 0;
2798
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2799
                update_mask |= MSR_EFER_SCE;
2800
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2801
                update_mask |= MSR_EFER_LME;
2802
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2803
                update_mask |= MSR_EFER_FFXSR;
2804
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2805
                update_mask |= MSR_EFER_NXE;
2806
            env->efer = (env->efer & ~update_mask) |
2807
            (val & update_mask);
2808
        }
2809
        break;
2810
    case MSR_STAR:
2811
        env->star = val;
2812
        break;
2813
    case MSR_PAT:
2814
        env->pat = val;
2815
        break;
2816
    case MSR_VM_HSAVE_PA:
2817
        env->vm_hsave = val;
2818
        break;
2819
#ifdef TARGET_X86_64
2820
    case MSR_LSTAR:
2821
        env->lstar = val;
2822
        break;
2823
    case MSR_CSTAR:
2824
        env->cstar = val;
2825
        break;
2826
    case MSR_FMASK:
2827
        env->fmask = val;
2828
        break;
2829
    case MSR_FSBASE:
2830
        env->segs[R_FS].base = val;
2831
        break;
2832
    case MSR_GSBASE:
2833
        env->segs[R_GS].base = val;
2834
        break;
2835
    case MSR_KERNELGSBASE:
2836
        env->kernelgsbase = val;
2837
        break;
2838
#endif
2839
    default:
2840
        /* XXX: exception ? */
2841
        break;
2842
    }
2843
}
2844

    
2845
void helper_rdmsr(void)
2846
{
2847
    uint64_t val;
2848
    switch((uint32_t)ECX) {
2849
    case MSR_IA32_SYSENTER_CS:
2850
        val = env->sysenter_cs;
2851
        break;
2852
    case MSR_IA32_SYSENTER_ESP:
2853
        val = env->sysenter_esp;
2854
        break;
2855
    case MSR_IA32_SYSENTER_EIP:
2856
        val = env->sysenter_eip;
2857
        break;
2858
    case MSR_IA32_APICBASE:
2859
        val = cpu_get_apic_base(env);
2860
        break;
2861
    case MSR_EFER:
2862
        val = env->efer;
2863
        break;
2864
    case MSR_STAR:
2865
        val = env->star;
2866
        break;
2867
    case MSR_PAT:
2868
        val = env->pat;
2869
        break;
2870
    case MSR_VM_HSAVE_PA:
2871
        val = env->vm_hsave;
2872
        break;
2873
#ifdef TARGET_X86_64
2874
    case MSR_LSTAR:
2875
        val = env->lstar;
2876
        break;
2877
    case MSR_CSTAR:
2878
        val = env->cstar;
2879
        break;
2880
    case MSR_FMASK:
2881
        val = env->fmask;
2882
        break;
2883
    case MSR_FSBASE:
2884
        val = env->segs[R_FS].base;
2885
        break;
2886
    case MSR_GSBASE:
2887
        val = env->segs[R_GS].base;
2888
        break;
2889
    case MSR_KERNELGSBASE:
2890
        val = env->kernelgsbase;
2891
        break;
2892
#endif
2893
    default:
2894
        /* XXX: exception ? */
2895
        val = 0;
2896
        break;
2897
    }
2898
    EAX = (uint32_t)(val);
2899
    EDX = (uint32_t)(val >> 32);
2900
}
2901
#endif
2902

    
2903
void helper_lsl(void)
2904
{
2905
    unsigned int selector, limit;
2906
    uint32_t e1, e2, eflags;
2907
    int rpl, dpl, cpl, type;
2908

    
2909
    eflags = cc_table[CC_OP].compute_all();
2910
    selector = T0 & 0xffff;
2911
    if (load_segment(&e1, &e2, selector) != 0)
2912
        goto fail;
2913
    rpl = selector & 3;
2914
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2915
    cpl = env->hflags & HF_CPL_MASK;
2916
    if (e2 & DESC_S_MASK) {
2917
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2918
            /* conforming */
2919
        } else {
2920
            if (dpl < cpl || dpl < rpl)
2921
                goto fail;
2922
        }
2923
    } else {
2924
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2925
        switch(type) {
2926
        case 1:
2927
        case 2:
2928
        case 3:
2929
        case 9:
2930
        case 11:
2931
            break;
2932
        default:
2933
            goto fail;
2934
        }
2935
        if (dpl < cpl || dpl < rpl) {
2936
        fail:
2937
            CC_SRC = eflags & ~CC_Z;
2938
            return;
2939
        }
2940
    }
2941
    limit = get_seg_limit(e1, e2);
2942
    T1 = limit;
2943
    CC_SRC = eflags | CC_Z;
2944
}
2945

    
2946
void helper_lar(void)
2947
{
2948
    unsigned int selector;
2949
    uint32_t e1, e2, eflags;
2950
    int rpl, dpl, cpl, type;
2951

    
2952
    eflags = cc_table[CC_OP].compute_all();
2953
    selector = T0 & 0xffff;
2954
    if ((selector & 0xfffc) == 0)
2955
        goto fail;
2956
    if (load_segment(&e1, &e2, selector) != 0)
2957
        goto fail;
2958
    rpl = selector & 3;
2959
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2960
    cpl = env->hflags & HF_CPL_MASK;
2961
    if (e2 & DESC_S_MASK) {
2962
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2963
            /* conforming */
2964
        } else {
2965
            if (dpl < cpl || dpl < rpl)
2966
                goto fail;
2967
        }
2968
    } else {
2969
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2970
        switch(type) {
2971
        case 1:
2972
        case 2:
2973
        case 3:
2974
        case 4:
2975
        case 5:
2976
        case 9:
2977
        case 11:
2978
        case 12:
2979
            break;
2980
        default:
2981
            goto fail;
2982
        }
2983
        if (dpl < cpl || dpl < rpl) {
2984
        fail:
2985
            CC_SRC = eflags & ~CC_Z;
2986
            return;
2987
        }
2988
    }
2989
    T1 = e2 & 0x00f0ff00;
2990
    CC_SRC = eflags | CC_Z;
2991
}
2992

    
2993
void helper_verr(void)
2994
{
2995
    unsigned int selector;
2996
    uint32_t e1, e2, eflags;
2997
    int rpl, dpl, cpl;
2998

    
2999
    eflags = cc_table[CC_OP].compute_all();
3000
    selector = T0 & 0xffff;
3001
    if ((selector & 0xfffc) == 0)
3002
        goto fail;
3003
    if (load_segment(&e1, &e2, selector) != 0)
3004
        goto fail;
3005
    if (!(e2 & DESC_S_MASK))
3006
        goto fail;
3007
    rpl = selector & 3;
3008
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3009
    cpl = env->hflags & HF_CPL_MASK;
3010
    if (e2 & DESC_CS_MASK) {
3011
        if (!(e2 & DESC_R_MASK))
3012
            goto fail;
3013
        if (!(e2 & DESC_C_MASK)) {
3014
            if (dpl < cpl || dpl < rpl)
3015
                goto fail;
3016
        }
3017
    } else {
3018
        if (dpl < cpl || dpl < rpl) {
3019
        fail:
3020
            CC_SRC = eflags & ~CC_Z;
3021
            return;
3022
        }
3023
    }
3024
    CC_SRC = eflags | CC_Z;
3025
}
3026

    
3027
void helper_verw(void)
3028
{
3029
    unsigned int selector;
3030
    uint32_t e1, e2, eflags;
3031
    int rpl, dpl, cpl;
3032

    
3033
    eflags = cc_table[CC_OP].compute_all();
3034
    selector = T0 & 0xffff;
3035
    if ((selector & 0xfffc) == 0)
3036
        goto fail;
3037
    if (load_segment(&e1, &e2, selector) != 0)
3038
        goto fail;
3039
    if (!(e2 & DESC_S_MASK))
3040
        goto fail;
3041
    rpl = selector & 3;
3042
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3043
    cpl = env->hflags & HF_CPL_MASK;
3044
    if (e2 & DESC_CS_MASK) {
3045
        goto fail;
3046
    } else {
3047
        if (dpl < cpl || dpl < rpl)
3048
            goto fail;
3049
        if (!(e2 & DESC_W_MASK)) {
3050
        fail:
3051
            CC_SRC = eflags & ~CC_Z;
3052
            return;
3053
        }
3054
    }
3055
    CC_SRC = eflags | CC_Z;
3056
}
3057

    
3058
/* FPU helpers */
3059

    
3060
void helper_fldt_ST0_A0(void)
3061
{
3062
    int new_fpstt;
3063
    new_fpstt = (env->fpstt - 1) & 7;
3064
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3065
    env->fpstt = new_fpstt;
3066
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3067
}
3068

    
3069
void helper_fstt_ST0_A0(void)
3070
{
3071
    helper_fstt(ST0, A0);
3072
}
3073

    
3074
static void fpu_set_exception(int mask)
3075
{
3076
    env->fpus |= mask;
3077
    if (env->fpus & (~env->fpuc & FPUC_EM))
3078
        env->fpus |= FPUS_SE | FPUS_B;
3079
}
3080

    
3081
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3082
{
3083
    if (b == 0.0)
3084
        fpu_set_exception(FPUS_ZE);
3085
    return a / b;
3086
}
3087

    
3088
void fpu_raise_exception(void)
3089
{
3090
    if (env->cr[0] & CR0_NE_MASK) {
3091
        raise_exception(EXCP10_COPR);
3092
    }
3093
#if !defined(CONFIG_USER_ONLY)
3094
    else {
3095
        cpu_set_ferr(env);
3096
    }
3097
#endif
3098
}
3099

    
3100
/* BCD ops */
3101

    
3102
void helper_fbld_ST0_A0(void)
3103
{
3104
    CPU86_LDouble tmp;
3105
    uint64_t val;
3106
    unsigned int v;
3107
    int i;
3108

    
3109
    val = 0;
3110
    for(i = 8; i >= 0; i--) {
3111
        v = ldub(A0 + i);
3112
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3113
    }
3114
    tmp = val;
3115
    if (ldub(A0 + 9) & 0x80)
3116
        tmp = -tmp;
3117
    fpush();
3118
    ST0 = tmp;
3119
}
3120

    
3121
void helper_fbst_ST0_A0(void)
3122
{
3123
    int v;
3124
    target_ulong mem_ref, mem_end;
3125
    int64_t val;
3126

    
3127
    val = floatx_to_int64(ST0, &env->fp_status);
3128
    mem_ref = A0;
3129
    mem_end = mem_ref + 9;
3130
    if (val < 0) {
3131
        stb(mem_end, 0x80);
3132
        val = -val;
3133
    } else {
3134
        stb(mem_end, 0x00);
3135
    }
3136
    while (mem_ref < mem_end) {
3137
        if (val == 0)
3138
            break;
3139
        v = val % 100;
3140
        val = val / 100;
3141
        v = ((v / 10) << 4) | (v % 10);
3142
        stb(mem_ref++, v);
3143
    }
3144
    while (mem_ref < mem_end) {
3145
        stb(mem_ref++, 0);
3146
    }
3147
}
3148

    
3149
void helper_f2xm1(void)
3150
{
3151
    ST0 = pow(2.0,ST0) - 1.0;
3152
}
3153

    
3154
void helper_fyl2x(void)
3155
{
3156
    CPU86_LDouble fptemp;
3157

    
3158
    fptemp = ST0;
3159
    if (fptemp>0.0){
3160
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3161
        ST1 *= fptemp;
3162
        fpop();
3163
    } else {
3164
        env->fpus &= (~0x4700);
3165
        env->fpus |= 0x400;
3166
    }
3167
}
3168

    
3169
void helper_fptan(void)
3170
{
3171
    CPU86_LDouble fptemp;
3172

    
3173
    fptemp = ST0;
3174
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3175
        env->fpus |= 0x400;
3176
    } else {
3177
        ST0 = tan(fptemp);
3178
        fpush();
3179
        ST0 = 1.0;
3180
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3181
        /* the above code is for  |arg| < 2**52 only */
3182
    }
3183
}
3184

    
3185
void helper_fpatan(void)
3186
{
3187
    CPU86_LDouble fptemp, fpsrcop;
3188

    
3189
    fpsrcop = ST1;
3190
    fptemp = ST0;
3191
    ST1 = atan2(fpsrcop,fptemp);
3192
    fpop();
3193
}
3194

    
3195
void helper_fxtract(void)
3196
{
3197
    CPU86_LDoubleU temp;
3198
    unsigned int expdif;
3199

    
3200
    temp.d = ST0;
3201
    expdif = EXPD(temp) - EXPBIAS;
3202
    /*DP exponent bias*/
3203
    ST0 = expdif;
3204
    fpush();
3205
    BIASEXPONENT(temp);
3206
    ST0 = temp.d;
3207
}
3208

    
3209
void helper_fprem1(void)
3210
{
3211
    CPU86_LDouble dblq, fpsrcop, fptemp;
3212
    CPU86_LDoubleU fpsrcop1, fptemp1;
3213
    int expdif;
3214
    signed long long int q;
3215

    
3216
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3217
        ST0 = 0.0 / 0.0; /* NaN */
3218
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3219
        return;
3220
    }
3221

    
3222
    fpsrcop = ST0;
3223
    fptemp = ST1;
3224
    fpsrcop1.d = fpsrcop;
3225
    fptemp1.d = fptemp;
3226
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3227

    
3228
    if (expdif < 0) {
3229
        /* optimisation? taken from the AMD docs */
3230
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3231
        /* ST0 is unchanged */
3232
        return;
3233
    }
3234

    
3235
    if (expdif < 53) {
3236
        dblq = fpsrcop / fptemp;
3237
        /* round dblq towards nearest integer */
3238
        dblq = rint(dblq);
3239
        ST0 = fpsrcop - fptemp * dblq;
3240

    
3241
        /* convert dblq to q by truncating towards zero */
3242
        if (dblq < 0.0)
3243
           q = (signed long long int)(-dblq);
3244
        else
3245
           q = (signed long long int)dblq;
3246

    
3247
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3248
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3249
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3250
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3251
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3252
    } else {
3253
        env->fpus |= 0x400;  /* C2 <-- 1 */
3254
        fptemp = pow(2.0, expdif - 50);
3255
        fpsrcop = (ST0 / ST1) / fptemp;
3256
        /* fpsrcop = integer obtained by chopping */
3257
        fpsrcop = (fpsrcop < 0.0) ?
3258
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3259
        ST0 -= (ST1 * fpsrcop * fptemp);
3260
    }
3261
}
3262

    
3263
void helper_fprem(void)
3264
{
3265
    CPU86_LDouble dblq, fpsrcop, fptemp;
3266
    CPU86_LDoubleU fpsrcop1, fptemp1;
3267
    int expdif;
3268
    signed long long int q;
3269

    
3270
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3271
       ST0 = 0.0 / 0.0; /* NaN */
3272
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3273
       return;
3274
    }
3275

    
3276
    fpsrcop = (CPU86_LDouble)ST0;
3277
    fptemp = (CPU86_LDouble)ST1;
3278
    fpsrcop1.d = fpsrcop;
3279
    fptemp1.d = fptemp;
3280
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3281

    
3282
    if (expdif < 0) {
3283
        /* optimisation? taken from the AMD docs */
3284
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3285
        /* ST0 is unchanged */
3286
        return;
3287
    }
3288

    
3289
    if ( expdif < 53 ) {
3290
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3291
        /* round dblq towards zero */
3292
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3293
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3294

    
3295
        /* convert dblq to q by truncating towards zero */
3296
        if (dblq < 0.0)
3297
           q = (signed long long int)(-dblq);
3298
        else
3299
           q = (signed long long int)dblq;
3300

    
3301
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3302
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3303
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3304
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3305
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3306
    } else {
3307
        int N = 32 + (expdif % 32); /* as per AMD docs */
3308
        env->fpus |= 0x400;  /* C2 <-- 1 */
3309
        fptemp = pow(2.0, (double)(expdif - N));
3310
        fpsrcop = (ST0 / ST1) / fptemp;
3311
        /* fpsrcop = integer obtained by chopping */
3312
        fpsrcop = (fpsrcop < 0.0) ?
3313
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3314
        ST0 -= (ST1 * fpsrcop * fptemp);
3315
    }
3316
}
3317

    
3318
void helper_fyl2xp1(void)
3319
{
3320
    CPU86_LDouble fptemp;
3321

    
3322
    fptemp = ST0;
3323
    if ((fptemp+1.0)>0.0) {
3324
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3325
        ST1 *= fptemp;
3326
        fpop();
3327
    } else {
3328
        env->fpus &= (~0x4700);
3329
        env->fpus |= 0x400;
3330
    }
3331
}
3332

    
3333
void helper_fsqrt(void)
3334
{
3335
    CPU86_LDouble fptemp;
3336

    
3337
    fptemp = ST0;
3338
    if (fptemp<0.0) {
3339
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3340
        env->fpus |= 0x400;
3341
    }
3342
    ST0 = sqrt(fptemp);
3343
}
3344

    
3345
void helper_fsincos(void)
3346
{
3347
    CPU86_LDouble fptemp;
3348

    
3349
    fptemp = ST0;
3350
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3351
        env->fpus |= 0x400;
3352
    } else {
3353
        ST0 = sin(fptemp);
3354
        fpush();
3355
        ST0 = cos(fptemp);
3356
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3357
        /* the above code is for  |arg| < 2**63 only */
3358
    }
3359
}
3360

    
3361
void helper_frndint(void)
3362
{
3363
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3364
}
3365

    
3366
void helper_fscale(void)
3367
{
3368
    ST0 = ldexp (ST0, (int)(ST1));
3369
}
3370

    
3371
void helper_fsin(void)
3372
{
3373
    CPU86_LDouble fptemp;
3374

    
3375
    fptemp = ST0;
3376
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3377
        env->fpus |= 0x400;
3378
    } else {
3379
        ST0 = sin(fptemp);
3380
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3381
        /* the above code is for  |arg| < 2**53 only */
3382
    }
3383
}
3384

    
3385
void helper_fcos(void)
3386
{
3387
    CPU86_LDouble fptemp;
3388

    
3389
    fptemp = ST0;
3390
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3391
        env->fpus |= 0x400;
3392
    } else {
3393
        ST0 = cos(fptemp);
3394
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3395
        /* the above code is for  |arg5 < 2**63 only */
3396
    }
3397
}
3398

    
3399
void helper_fxam_ST0(void)
3400
{
3401
    CPU86_LDoubleU temp;
3402
    int expdif;
3403

    
3404
    temp.d = ST0;
3405

    
3406
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3407
    if (SIGND(temp))
3408
        env->fpus |= 0x200; /* C1 <-- 1 */
3409

    
3410
    /* XXX: test fptags too */
3411
    expdif = EXPD(temp);
3412
    if (expdif == MAXEXPD) {
3413
#ifdef USE_X86LDOUBLE
3414
        if (MANTD(temp) == 0x8000000000000000ULL)
3415
#else
3416
        if (MANTD(temp) == 0)
3417
#endif
3418
            env->fpus |=  0x500 /*Infinity*/;
3419
        else
3420
            env->fpus |=  0x100 /*NaN*/;
3421
    } else if (expdif == 0) {
3422
        if (MANTD(temp) == 0)
3423
            env->fpus |=  0x4000 /*Zero*/;
3424
        else
3425
            env->fpus |= 0x4400 /*Denormal*/;
3426
    } else {
3427
        env->fpus |= 0x400;
3428
    }
3429
}
3430

    
3431
void helper_fstenv(target_ulong ptr, int data32)
3432
{
3433
    int fpus, fptag, exp, i;
3434
    uint64_t mant;
3435
    CPU86_LDoubleU tmp;
3436

    
3437
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3438
    fptag = 0;
3439
    for (i=7; i>=0; i--) {
3440
        fptag <<= 2;
3441
        if (env->fptags[i]) {
3442
            fptag |= 3;
3443
        } else {
3444
            tmp.d = env->fpregs[i].d;
3445
            exp = EXPD(tmp);
3446
            mant = MANTD(tmp);
3447
            if (exp == 0 && mant == 0) {
3448
                /* zero */
3449
                fptag |= 1;
3450
            } else if (exp == 0 || exp == MAXEXPD
3451
#ifdef USE_X86LDOUBLE
3452
                       || (mant & (1LL << 63)) == 0
3453
#endif
3454
                       ) {
3455
                /* NaNs, infinity, denormal */
3456
                fptag |= 2;
3457
            }
3458
        }
3459
    }
3460
    if (data32) {
3461
        /* 32 bit */
3462
        stl(ptr, env->fpuc);
3463
        stl(ptr + 4, fpus);
3464
        stl(ptr + 8, fptag);
3465
        stl(ptr + 12, 0); /* fpip */
3466
        stl(ptr + 16, 0); /* fpcs */
3467
        stl(ptr + 20, 0); /* fpoo */
3468
        stl(ptr + 24, 0); /* fpos */
3469
    } else {
3470
        /* 16 bit */
3471
        stw(ptr, env->fpuc);
3472
        stw(ptr + 2, fpus);
3473
        stw(ptr + 4, fptag);
3474
        stw(ptr + 6, 0);
3475
        stw(ptr + 8, 0);
3476
        stw(ptr + 10, 0);
3477
        stw(ptr + 12, 0);
3478
    }
3479
}
3480

    
3481
void helper_fldenv(target_ulong ptr, int data32)
3482
{
3483
    int i, fpus, fptag;
3484

    
3485
    if (data32) {
3486
        env->fpuc = lduw(ptr);
3487
        fpus = lduw(ptr + 4);
3488
        fptag = lduw(ptr + 8);
3489
    }
3490
    else {
3491
        env->fpuc = lduw(ptr);
3492
        fpus = lduw(ptr + 2);
3493
        fptag = lduw(ptr + 4);
3494
    }
3495
    env->fpstt = (fpus >> 11) & 7;
3496
    env->fpus = fpus & ~0x3800;
3497
    for(i = 0;i < 8; i++) {
3498
        env->fptags[i] = ((fptag & 3) == 3);
3499
        fptag >>= 2;
3500
    }
3501
}
3502

    
3503
void helper_fsave(target_ulong ptr, int data32)
3504
{
3505
    CPU86_LDouble tmp;
3506
    int i;
3507

    
3508
    helper_fstenv(ptr, data32);
3509

    
3510
    ptr += (14 << data32);
3511
    for(i = 0;i < 8; i++) {
3512
        tmp = ST(i);
3513
        helper_fstt(tmp, ptr);
3514
        ptr += 10;
3515
    }
3516

    
3517
    /* fninit */
3518
    env->fpus = 0;
3519
    env->fpstt = 0;
3520
    env->fpuc = 0x37f;
3521
    env->fptags[0] = 1;
3522
    env->fptags[1] = 1;
3523
    env->fptags[2] = 1;
3524
    env->fptags[3] = 1;
3525
    env->fptags[4] = 1;
3526
    env->fptags[5] = 1;
3527
    env->fptags[6] = 1;
3528
    env->fptags[7] = 1;
3529
}
3530

    
3531
void helper_frstor(target_ulong ptr, int data32)
3532
{
3533
    CPU86_LDouble tmp;
3534
    int i;
3535

    
3536
    helper_fldenv(ptr, data32);
3537
    ptr += (14 << data32);
3538

    
3539
    for(i = 0;i < 8; i++) {
3540
        tmp = helper_fldt(ptr);
3541
        ST(i) = tmp;
3542
        ptr += 10;
3543
    }
3544
}
3545

    
3546
void helper_fxsave(target_ulong ptr, int data64)
3547
{
3548
    int fpus, fptag, i, nb_xmm_regs;
3549
    CPU86_LDouble tmp;
3550
    target_ulong addr;
3551

    
3552
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3553
    fptag = 0;
3554
    for(i = 0; i < 8; i++) {
3555
        fptag |= (env->fptags[i] << i);
3556
    }
3557
    stw(ptr, env->fpuc);
3558
    stw(ptr + 2, fpus);
3559
    stw(ptr + 4, fptag ^ 0xff);
3560

    
3561
    addr = ptr + 0x20;
3562
    for(i = 0;i < 8; i++) {
3563
        tmp = ST(i);
3564
        helper_fstt(tmp, addr);
3565
        addr += 16;
3566
    }
3567

    
3568
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3569
        /* XXX: finish it */
3570
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3571
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3572
        nb_xmm_regs = 8 << data64;
3573
        addr = ptr + 0xa0;
3574
        for(i = 0; i < nb_xmm_regs; i++) {
3575
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3576
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3577
            addr += 16;
3578
        }
3579
    }
3580
}
3581

    
3582
void helper_fxrstor(target_ulong ptr, int data64)
3583
{
3584
    int i, fpus, fptag, nb_xmm_regs;
3585
    CPU86_LDouble tmp;
3586
    target_ulong addr;
3587

    
3588
    env->fpuc = lduw(ptr);
3589
    fpus = lduw(ptr + 2);
3590
    fptag = lduw(ptr + 4);
3591
    env->fpstt = (fpus >> 11) & 7;
3592
    env->fpus = fpus & ~0x3800;
3593
    fptag ^= 0xff;
3594
    for(i = 0;i < 8; i++) {
3595
        env->fptags[i] = ((fptag >> i) & 1);
3596
    }
3597

    
3598
    addr = ptr + 0x20;
3599
    for(i = 0;i < 8; i++) {
3600
        tmp = helper_fldt(addr);
3601
        ST(i) = tmp;
3602
        addr += 16;
3603
    }
3604

    
3605
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3606
        /* XXX: finish it */
3607
        env->mxcsr = ldl(ptr + 0x18);
3608
        //ldl(ptr + 0x1c);
3609
        nb_xmm_regs = 8 << data64;
3610
        addr = ptr + 0xa0;
3611
        for(i = 0; i < nb_xmm_regs; i++) {
3612
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3613
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3614
            addr += 16;
3615
        }
3616
    }
3617
}
3618

    
3619
#ifndef USE_X86LDOUBLE
3620

    
3621
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3622
{
3623
    CPU86_LDoubleU temp;
3624
    int e;
3625

    
3626
    temp.d = f;
3627
    /* mantissa */
3628
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3629
    /* exponent + sign */
3630
    e = EXPD(temp) - EXPBIAS + 16383;
3631
    e |= SIGND(temp) >> 16;
3632
    *pexp = e;
3633
}
3634

    
3635
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3636
{
3637
    CPU86_LDoubleU temp;
3638
    int e;
3639
    uint64_t ll;
3640

    
3641
    /* XXX: handle overflow ? */
3642
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3643
    e |= (upper >> 4) & 0x800; /* sign */
3644
    ll = (mant >> 11) & ((1LL << 52) - 1);
3645
#ifdef __arm__
3646
    temp.l.upper = (e << 20) | (ll >> 32);
3647
    temp.l.lower = ll;
3648
#else
3649
    temp.ll = ll | ((uint64_t)e << 52);
3650
#endif
3651
    return temp.d;
3652
}
3653

    
3654
#else
3655

    
3656
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3657
{
3658
    CPU86_LDoubleU temp;
3659

    
3660
    temp.d = f;
3661
    *pmant = temp.l.lower;
3662
    *pexp = temp.l.upper;
3663
}
3664

    
3665
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3666
{
3667
    CPU86_LDoubleU temp;
3668

    
3669
    temp.l.upper = upper;
3670
    temp.l.lower = mant;
3671
    return temp.d;
3672
}
3673
#endif
3674

    
3675
#ifdef TARGET_X86_64
3676

    
3677
//#define DEBUG_MULDIV
3678

    
3679
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3680
{
3681
    *plow += a;
3682
    /* carry test */
3683
    if (*plow < a)
3684
        (*phigh)++;
3685
    *phigh += b;
3686
}
3687

    
3688
static void neg128(uint64_t *plow, uint64_t *phigh)
3689
{
3690
    *plow = ~ *plow;
3691
    *phigh = ~ *phigh;
3692
    add128(plow, phigh, 1, 0);
3693
}
3694

    
3695
/* return TRUE if overflow */
3696
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3697
{
3698
    uint64_t q, r, a1, a0;
3699
    int i, qb, ab;
3700

    
3701
    a0 = *plow;
3702
    a1 = *phigh;
3703
    if (a1 == 0) {
3704
        q = a0 / b;
3705
        r = a0 % b;
3706
        *plow = q;
3707
        *phigh = r;
3708
    } else {
3709
        if (a1 >= b)
3710
            return 1;
3711
        /* XXX: use a better algorithm */
3712
        for(i = 0; i < 64; i++) {
3713
            ab = a1 >> 63;
3714
            a1 = (a1 << 1) | (a0 >> 63);
3715
            if (ab || a1 >= b) {
3716
                a1 -= b;
3717
                qb = 1;
3718
            } else {
3719
                qb = 0;
3720
            }
3721
            a0 = (a0 << 1) | qb;
3722
        }
3723
#if defined(DEBUG_MULDIV)
3724
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3725
               *phigh, *plow, b, a0, a1);
3726
#endif
3727
        *plow = a0;
3728
        *phigh = a1;
3729
    }
3730
    return 0;
3731
}
3732

    
3733
/* return TRUE if overflow */
3734
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3735
{
3736
    int sa, sb;
3737
    sa = ((int64_t)*phigh < 0);
3738
    if (sa)
3739
        neg128(plow, phigh);
3740
    sb = (b < 0);
3741
    if (sb)
3742
        b = -b;
3743
    if (div64(plow, phigh, b) != 0)
3744
        return 1;
3745
    if (sa ^ sb) {
3746
        if (*plow > (1ULL << 63))
3747
            return 1;
3748
        *plow = - *plow;
3749
    } else {
3750
        if (*plow >= (1ULL << 63))
3751
            return 1;
3752
    }
3753
    if (sa)
3754
        *phigh = - *phigh;
3755
    return 0;
3756
}
3757

    
3758
void helper_mulq_EAX_T0(void)
3759
{
3760
    uint64_t r0, r1;
3761

    
3762
    mulu64(&r0, &r1, EAX, T0);
3763
    EAX = r0;
3764
    EDX = r1;
3765
    CC_DST = r0;
3766
    CC_SRC = r1;
3767
}
3768

    
3769
void helper_imulq_EAX_T0(void)
3770
{
3771
    uint64_t r0, r1;
3772

    
3773
    muls64(&r0, &r1, EAX, T0);
3774
    EAX = r0;
3775
    EDX = r1;
3776
    CC_DST = r0;
3777
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3778
}
3779

    
3780
void helper_imulq_T0_T1(void)
3781
{
3782
    uint64_t r0, r1;
3783

    
3784
    muls64(&r0, &r1, T0, T1);
3785
    T0 = r0;
3786
    CC_DST = r0;
3787
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3788
}
3789

    
3790
void helper_divq_EAX_T0(void)
3791
{
3792
    uint64_t r0, r1;
3793
    if (T0 == 0) {
3794
        raise_exception(EXCP00_DIVZ);
3795
    }
3796
    r0 = EAX;
3797
    r1 = EDX;
3798
    if (div64(&r0, &r1, T0))
3799
        raise_exception(EXCP00_DIVZ);
3800
    EAX = r0;
3801
    EDX = r1;
3802
}
3803

    
3804
void helper_idivq_EAX_T0(void)
3805
{
3806
    uint64_t r0, r1;
3807
    if (T0 == 0) {
3808
        raise_exception(EXCP00_DIVZ);
3809
    }
3810
    r0 = EAX;
3811
    r1 = EDX;
3812
    if (idiv64(&r0, &r1, T0))
3813
        raise_exception(EXCP00_DIVZ);
3814
    EAX = r0;
3815
    EDX = r1;
3816
}
3817

    
3818
void helper_bswapq_T0(void)
3819
{
3820
    T0 = bswap64(T0);
3821
}
3822
#endif
3823

    
3824
void helper_hlt(void)
3825
{
3826
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3827
    env->hflags |= HF_HALTED_MASK;
3828
    env->exception_index = EXCP_HLT;
3829
    cpu_loop_exit();
3830
}
3831

    
3832
void helper_monitor(void)
3833
{
3834
    if ((uint32_t)ECX != 0)
3835
        raise_exception(EXCP0D_GPF);
3836
    /* XXX: store address ? */
3837
}
3838

    
3839
void helper_mwait(void)
3840
{
3841
    if ((uint32_t)ECX != 0)
3842
        raise_exception(EXCP0D_GPF);
3843
    /* XXX: not complete but not completely erroneous */
3844
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3845
        /* more than one CPU: do not sleep because another CPU may
3846
           wake this one */
3847
    } else {
3848
        helper_hlt();
3849
    }
3850
}
3851

    
3852
float approx_rsqrt(float a)
3853
{
3854
    return 1.0 / sqrt(a);
3855
}
3856

    
3857
float approx_rcp(float a)
3858
{
3859
    return 1.0 / a;
3860
}
3861

    
3862
void update_fp_status(void)
3863
{
3864
    int rnd_type;
3865

    
3866
    /* set rounding mode */
3867
    switch(env->fpuc & RC_MASK) {
3868
    default:
3869
    case RC_NEAR:
3870
        rnd_type = float_round_nearest_even;
3871
        break;
3872
    case RC_DOWN:
3873
        rnd_type = float_round_down;
3874
        break;
3875
    case RC_UP:
3876
        rnd_type = float_round_up;
3877
        break;
3878
    case RC_CHOP:
3879
        rnd_type = float_round_to_zero;
3880
        break;
3881
    }
3882
    set_float_rounding_mode(rnd_type, &env->fp_status);
3883
#ifdef FLOATX80
3884
    switch((env->fpuc >> 8) & 3) {
3885
    case 0:
3886
        rnd_type = 32;
3887
        break;
3888
    case 2:
3889
        rnd_type = 64;
3890
        break;
3891
    case 3:
3892
    default:
3893
        rnd_type = 80;
3894
        break;
3895
    }
3896
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3897
#endif
3898
}
3899

    
3900
#if !defined(CONFIG_USER_ONLY)
3901

    
3902
#define MMUSUFFIX _mmu
3903
#ifdef __s390__
3904
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3905
#else
3906
# define GETPC() (__builtin_return_address(0))
3907
#endif
3908

    
3909
#define SHIFT 0
3910
#include "softmmu_template.h"
3911

    
3912
#define SHIFT 1
3913
#include "softmmu_template.h"
3914

    
3915
#define SHIFT 2
3916
#include "softmmu_template.h"
3917

    
3918
#define SHIFT 3
3919
#include "softmmu_template.h"
3920

    
3921
#endif
3922

    
3923
/* try to fill the TLB and return an exception if error. If retaddr is
3924
   NULL, it means that the function was called in C code (i.e. not
3925
   from generated code or from helper.c) */
3926
/* XXX: fix it to restore all registers */
3927
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3928
{
3929
    TranslationBlock *tb;
3930
    int ret;
3931
    unsigned long pc;
3932
    CPUX86State *saved_env;
3933

    
3934
    /* XXX: hack to restore env in all cases, even if not called from
3935
       generated code */
3936
    saved_env = env;
3937
    env = cpu_single_env;
3938

    
3939
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3940
    if (ret) {
3941
        if (retaddr) {
3942
            /* now we have a real cpu fault */
3943
            pc = (unsigned long)retaddr;
3944
            tb = tb_find_pc(pc);
3945
            if (tb) {
3946
                /* the PC is inside the translated code. It means that we have
3947
                   a virtual CPU fault */
3948
                cpu_restore_state(tb, env, pc, NULL);
3949
            }
3950
        }
3951
        if (retaddr)
3952
            raise_exception_err(env->exception_index, env->error_code);
3953
        else
3954
            raise_exception_err_norestore(env->exception_index, env->error_code);
3955
    }
3956
    env = saved_env;
3957
}
3958

    
3959

    
3960
/* Secure Virtual Machine helpers */
3961

    
3962
void helper_stgi(void)
3963
{
3964
    env->hflags |= HF_GIF_MASK;
3965
}
3966

    
3967
void helper_clgi(void)
3968
{
3969
    env->hflags &= ~HF_GIF_MASK;
3970
}
3971

    
3972
#if defined(CONFIG_USER_ONLY)
3973

    
3974
void helper_vmrun(target_ulong addr) { }
3975
void helper_vmmcall(void) { }
3976
void helper_vmload(target_ulong addr) { }
3977
void helper_vmsave(target_ulong addr) { }
3978
void helper_skinit(void) { }
3979
void helper_invlpga(void) { }
3980
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3981
int svm_check_intercept_param(uint32_t type, uint64_t param)
3982
{
3983
    return 0;
3984
}
3985

    
3986
#else
3987

    
3988
static inline uint32_t
3989
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3990
{
3991
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3992
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3993
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3994
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3995
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3996
}
3997

    
3998
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3999
{
4000
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4001
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4002
}
4003

    
4004
extern uint8_t *phys_ram_base;
4005
void helper_vmrun(target_ulong addr)
4006
{
4007
    uint32_t event_inj;
4008
    uint32_t int_ctl;
4009

    
4010
    if (loglevel & CPU_LOG_TB_IN_ASM)
4011
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4012

    
4013
    env->vm_vmcb = addr;
4014
    regs_to_env();
4015

    
4016
    /* save the current CPU state in the hsave page */
4017
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4018
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4019

    
4020
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4021
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4022

    
4023
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4024
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4025
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4026
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4027
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4028
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4029
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4030

    
4031
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4032
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4033

    
4034
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4035
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4036
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4037
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4038

    
4039
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4040
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4041
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4042

    
4043
    /* load the interception bitmaps so we do not need to access the
4044
       vmcb in svm mode */
4045
    /* We shift all the intercept bits so we can OR them with the TB
4046
       flags later on */
4047
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4048
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4049
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4050
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4051
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4052
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4053

    
4054
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4055
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4056

    
4057
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4058
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4059

    
4060
    /* clear exit_info_2 so we behave like the real hardware */
4061
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4062

    
4063
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4064
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4065
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4066
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4067
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4068
    if (int_ctl & V_INTR_MASKING_MASK) {
4069
        env->cr[8] = int_ctl & V_TPR_MASK;
4070
        cpu_set_apic_tpr(env, env->cr[8]);
4071
        if (env->eflags & IF_MASK)
4072
            env->hflags |= HF_HIF_MASK;
4073
    }
4074

    
4075
#ifdef TARGET_X86_64
4076
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4077
    env->hflags &= ~HF_LMA_MASK;
4078
    if (env->efer & MSR_EFER_LMA)
4079
       env->hflags |= HF_LMA_MASK;
4080
#endif
4081
    env->eflags = 0;
4082
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4083
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4084
    CC_OP = CC_OP_EFLAGS;
4085
    CC_DST = 0xffffffff;
4086

    
4087
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4088
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4089
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4090
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4091

    
4092
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4093
    env->eip = EIP;
4094
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4095
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4096
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4097
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4098
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4099

    
4100
    /* FIXME: guest state consistency checks */
4101

    
4102
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4103
        case TLB_CONTROL_DO_NOTHING:
4104
            break;
4105
        case TLB_CONTROL_FLUSH_ALL_ASID:
4106
            /* FIXME: this is not 100% correct but should work for now */
4107
            tlb_flush(env, 1);
4108
        break;
4109
    }
4110

    
4111
    helper_stgi();
4112

    
4113
    regs_to_env();
4114

    
4115
    /* maybe we need to inject an event */
4116
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4117
    if (event_inj & SVM_EVTINJ_VALID) {
4118
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4119
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4120
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4121
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4122

    
4123
        if (loglevel & CPU_LOG_TB_IN_ASM)
4124
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4125
        /* FIXME: need to implement valid_err */
4126
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4127
        case SVM_EVTINJ_TYPE_INTR:
4128
                env->exception_index = vector;
4129
                env->error_code = event_inj_err;
4130
                env->exception_is_int = 0;
4131
                env->exception_next_eip = -1;
4132
                if (loglevel & CPU_LOG_TB_IN_ASM)
4133
                    fprintf(logfile, "INTR");
4134
                break;
4135
        case SVM_EVTINJ_TYPE_NMI:
4136
                env->exception_index = vector;
4137
                env->error_code = event_inj_err;
4138
                env->exception_is_int = 0;
4139
                env->exception_next_eip = EIP;
4140
                if (loglevel & CPU_LOG_TB_IN_ASM)
4141
                    fprintf(logfile, "NMI");
4142
                break;
4143
        case SVM_EVTINJ_TYPE_EXEPT:
4144
                env->exception_index = vector;
4145
                env->error_code = event_inj_err;
4146
                env->exception_is_int = 0;
4147
                env->exception_next_eip = -1;
4148
                if (loglevel & CPU_LOG_TB_IN_ASM)
4149
                    fprintf(logfile, "EXEPT");
4150
                break;
4151
        case SVM_EVTINJ_TYPE_SOFT:
4152
                env->exception_index = vector;
4153
                env->error_code = event_inj_err;
4154
                env->exception_is_int = 1;
4155
                env->exception_next_eip = EIP;
4156
                if (loglevel & CPU_LOG_TB_IN_ASM)
4157
                    fprintf(logfile, "SOFT");
4158
                break;
4159
        }
4160
        if (loglevel & CPU_LOG_TB_IN_ASM)
4161
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4162
    }
4163
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4164
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4165
    }
4166

    
4167
    cpu_loop_exit();
4168
}
4169

    
4170
void helper_vmmcall(void)
4171
{
4172
    if (loglevel & CPU_LOG_TB_IN_ASM)
4173
        fprintf(logfile,"vmmcall!\n");
4174
}
4175

    
4176
void helper_vmload(target_ulong addr)
4177
{
4178
    if (loglevel & CPU_LOG_TB_IN_ASM)
4179
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4180
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4181
                env->segs[R_FS].base);
4182

    
4183
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4184
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4185
    SVM_LOAD_SEG2(addr, tr, tr);
4186
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4187

    
4188
#ifdef TARGET_X86_64
4189
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4190
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4191
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4192
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4193
#endif
4194
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4195
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4196
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4197
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4198
}
4199

    
4200
void helper_vmsave(target_ulong addr)
4201
{
4202
    if (loglevel & CPU_LOG_TB_IN_ASM)
4203
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4204
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4205
                env->segs[R_FS].base);
4206

    
4207
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4208
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4209
    SVM_SAVE_SEG(addr, tr, tr);
4210
    SVM_SAVE_SEG(addr, ldt, ldtr);
4211

    
4212
#ifdef TARGET_X86_64
4213
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4214
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4215
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4216
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4217
#endif
4218
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4219
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4220
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4221
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4222
}
4223

    
4224
void helper_skinit(void)
4225
{
4226
    if (loglevel & CPU_LOG_TB_IN_ASM)
4227
        fprintf(logfile,"skinit!\n");
4228
}
4229

    
4230
void helper_invlpga(void)
4231
{
4232
    tlb_flush(env, 0);
4233
}
4234

    
4235
int svm_check_intercept_param(uint32_t type, uint64_t param)
4236
{
4237
    switch(type) {
4238
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4239
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4240
            vmexit(type, param);
4241
            return 1;
4242
        }
4243
        break;
4244
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4245
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4246
            vmexit(type, param);
4247
            return 1;
4248
        }
4249
        break;
4250
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4251
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4252
            vmexit(type, param);
4253
            return 1;
4254
        }
4255
        break;
4256
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4257
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4258
            vmexit(type, param);
4259
            return 1;
4260
        }
4261
        break;
4262
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4263
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4264
            vmexit(type, param);
4265
            return 1;
4266
        }
4267
        break;
4268
    case SVM_EXIT_IOIO:
4269
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4270
            /* FIXME: this should be read in at vmrun (faster this way?) */
4271
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4272
            uint16_t port = (uint16_t) (param >> 16);
4273

    
4274
            uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4275
            if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4276
                vmexit(type, param);
4277
        }
4278
        break;
4279

    
4280
    case SVM_EXIT_MSR:
4281
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4282
            /* FIXME: this should be read in at vmrun (faster this way?) */
4283
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4284
            switch((uint32_t)ECX) {
4285
            case 0 ... 0x1fff:
4286
                T0 = (ECX * 2) % 8;
4287
                T1 = ECX / 8;
4288
                break;
4289
            case 0xc0000000 ... 0xc0001fff:
4290
                T0 = (8192 + ECX - 0xc0000000) * 2;
4291
                T1 = (T0 / 8);
4292
                T0 %= 8;
4293
                break;
4294
            case 0xc0010000 ... 0xc0011fff:
4295
                T0 = (16384 + ECX - 0xc0010000) * 2;
4296
                T1 = (T0 / 8);
4297
                T0 %= 8;
4298
                break;
4299
            default:
4300
                vmexit(type, param);
4301
                return 1;
4302
            }
4303
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4304
                vmexit(type, param);
4305
            return 1;
4306
        }
4307
        break;
4308
    default:
4309
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4310
            vmexit(type, param);
4311
            return 1;
4312
        }
4313
        break;
4314
    }
4315
    return 0;
4316
}
4317

    
4318
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4319
{
4320
    uint32_t int_ctl;
4321

    
4322
    if (loglevel & CPU_LOG_TB_IN_ASM)
4323
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4324
                exit_code, exit_info_1,
4325
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4326
                EIP);
4327

    
4328
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4329
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4330
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4331
    } else {
4332
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4333
    }
4334

    
4335
    /* Save the VM state in the vmcb */
4336
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4337
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4338
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4339
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4340

    
4341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4342
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4343

    
4344
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4345
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4346

    
4347
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4348
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4349
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4350
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4351
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4352

    
4353
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4354
        int_ctl &= ~V_TPR_MASK;
4355
        int_ctl |= env->cr[8] & V_TPR_MASK;
4356
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4357
    }
4358

    
4359
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4360
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4361
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4362
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4363
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4364
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4365
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4366

    
4367
    /* Reload the host state from vm_hsave */
4368
    env->hflags &= ~HF_HIF_MASK;
4369
    env->intercept = 0;
4370
    env->intercept_exceptions = 0;
4371
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4372

    
4373
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4374
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4375

    
4376
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4377
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4378

    
4379
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4380
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4381
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4382
    if (int_ctl & V_INTR_MASKING_MASK) {
4383
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4384
        cpu_set_apic_tpr(env, env->cr[8]);
4385
    }
4386
    /* we need to set the efer after the crs so the hidden flags get set properly */
4387
#ifdef TARGET_X86_64
4388
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4389
    env->hflags &= ~HF_LMA_MASK;
4390
    if (env->efer & MSR_EFER_LMA)
4391
       env->hflags |= HF_LMA_MASK;
4392
#endif
4393

    
4394
    env->eflags = 0;
4395
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4396
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4397
    CC_OP = CC_OP_EFLAGS;
4398

    
4399
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4400
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4401
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4402
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4403

    
4404
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4405
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4406
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4407

    
4408
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4409
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4410

    
4411
    /* other setups */
4412
    cpu_x86_set_cpl(env, 0);
4413
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4414
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4415
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4416

    
4417
    helper_clgi();
4418
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4419

    
4420
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4421

    
4422
    /* Clears the TSC_OFFSET inside the processor. */
4423

    
4424
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4425
       from the page table indicated the host's CR3. If the PDPEs contain
4426
       illegal state, the processor causes a shutdown. */
4427

    
4428
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4429
    env->cr[0] |= CR0_PE_MASK;
4430
    env->eflags &= ~VM_MASK;
4431

    
4432
    /* Disables all breakpoints in the host DR7 register. */
4433

    
4434
    /* Checks the reloaded host state for consistency. */
4435

    
4436
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4437
       host's code segment or non-canonical (in the case of long mode), a
4438
       #GP fault is delivered inside the host.) */
4439

    
4440
    /* remove any pending exception */
4441
    env->exception_index = -1;
4442
    env->error_code = 0;
4443
    env->old_exception = -1;
4444

    
4445
    regs_to_env();
4446
    cpu_loop_exit();
4447
}
4448

    
4449
#endif