Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 00f82b8a

History | View | Annotate | Download (131.3 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        load_eflags(env->eflags, 0);
1012
        if (code64)
1013
            env->eip = env->lstar;
1014
        else
1015
            env->eip = env->cstar;
1016
    } else
1017
#endif
1018
    {
1019
        ECX = (uint32_t)(env->eip + next_eip_addend);
1020

    
1021
        cpu_x86_set_cpl(env, 0);
1022
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023
                           0, 0xffffffff,
1024
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025
                               DESC_S_MASK |
1026
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028
                               0, 0xffffffff,
1029
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030
                               DESC_S_MASK |
1031
                               DESC_W_MASK | DESC_A_MASK);
1032
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1033
        env->eip = (uint32_t)env->star;
1034
    }
1035
}
1036
#endif
1037

    
1038
void helper_sysret(int dflag)
1039
{
1040
    int cpl, selector;
1041

    
1042
    if (!(env->efer & MSR_EFER_SCE)) {
1043
        raise_exception_err(EXCP06_ILLOP, 0);
1044
    }
1045
    cpl = env->hflags & HF_CPL_MASK;
1046
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047
        raise_exception_err(EXCP0D_GPF, 0);
1048
    }
1049
    selector = (env->star >> 48) & 0xffff;
1050
#ifdef TARGET_X86_64
1051
    if (env->hflags & HF_LMA_MASK) {
1052
        if (dflag == 2) {
1053
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054
                                   0, 0xffffffff,
1055
                                   DESC_G_MASK | DESC_P_MASK |
1056
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1058
                                   DESC_L_MASK);
1059
            env->eip = ECX;
1060
        } else {
1061
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062
                                   0, 0xffffffff,
1063
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1066
            env->eip = (uint32_t)ECX;
1067
        }
1068
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1069
                               0, 0xffffffff,
1070
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072
                               DESC_W_MASK | DESC_A_MASK);
1073
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1074
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1075
        cpu_x86_set_cpl(env, 3);
1076
    } else
1077
#endif
1078
    {
1079
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1080
                               0, 0xffffffff,
1081
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1082
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1084
        env->eip = (uint32_t)ECX;
1085
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1086
                               0, 0xffffffff,
1087
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                               DESC_W_MASK | DESC_A_MASK);
1090
        env->eflags |= IF_MASK;
1091
        cpu_x86_set_cpl(env, 3);
1092
    }
1093
#ifdef USE_KQEMU
1094
    if (kqemu_is_ok(env)) {
1095
        if (env->hflags & HF_LMA_MASK)
1096
            CC_OP = CC_OP_EFLAGS;
1097
        env->exception_index = -1;
1098
        cpu_loop_exit();
1099
    }
1100
#endif
1101
}
1102

    
1103
/* real mode interrupt */
1104
static void do_interrupt_real(int intno, int is_int, int error_code,
1105
                              unsigned int next_eip)
1106
{
1107
    SegmentCache *dt;
1108
    target_ulong ptr, ssp;
1109
    int selector;
1110
    uint32_t offset, esp;
1111
    uint32_t old_cs, old_eip;
1112
    int svm_should_check = 1;
1113

    
1114
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1115
        next_eip = EIP;
1116
        svm_should_check = 0;
1117
    }
1118
    if (svm_should_check
1119
        && INTERCEPTEDl(_exceptions, 1 << intno)
1120
        && !is_int) {
1121
        raise_interrupt(intno, is_int, error_code, 0);
1122
    }
1123
    /* real mode (simpler !) */
1124
    dt = &env->idt;
1125
    if (intno * 4 + 3 > dt->limit)
1126
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1127
    ptr = dt->base + intno * 4;
1128
    offset = lduw_kernel(ptr);
1129
    selector = lduw_kernel(ptr + 2);
1130
    esp = ESP;
1131
    ssp = env->segs[R_SS].base;
1132
    if (is_int)
1133
        old_eip = next_eip;
1134
    else
1135
        old_eip = env->eip;
1136
    old_cs = env->segs[R_CS].selector;
1137
    /* XXX: use SS segment size ? */
1138
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1139
    PUSHW(ssp, esp, 0xffff, old_cs);
1140
    PUSHW(ssp, esp, 0xffff, old_eip);
1141

    
1142
    /* update processor state */
1143
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1144
    env->eip = offset;
1145
    env->segs[R_CS].selector = selector;
1146
    env->segs[R_CS].base = (selector << 4);
1147
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1148
}
1149

    
1150
/* fake user mode interrupt */
1151
void do_interrupt_user(int intno, int is_int, int error_code,
1152
                       target_ulong next_eip)
1153
{
1154
    SegmentCache *dt;
1155
    target_ulong ptr;
1156
    int dpl, cpl, shift;
1157
    uint32_t e2;
1158

    
1159
    dt = &env->idt;
1160
    if (env->hflags & HF_LMA_MASK) {
1161
        shift = 4;
1162
    } else {
1163
        shift = 3;
1164
    }
1165
    ptr = dt->base + (intno << shift);
1166
    e2 = ldl_kernel(ptr + 4);
1167

    
1168
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1169
    cpl = env->hflags & HF_CPL_MASK;
1170
    /* check privledge if software int */
1171
    if (is_int && dpl < cpl)
1172
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173

    
1174
    /* Since we emulate only user space, we cannot do more than
1175
       exiting the emulation with the suitable exception and error
1176
       code */
1177
    if (is_int)
1178
        EIP = next_eip;
1179
}
1180

    
1181
/*
1182
 * Begin execution of an interruption. is_int is TRUE if coming from
1183
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1184
 * instruction. It is only relevant if is_int is TRUE.
1185
 */
1186
void do_interrupt(int intno, int is_int, int error_code,
1187
                  target_ulong next_eip, int is_hw)
1188
{
1189
    if (loglevel & CPU_LOG_INT) {
1190
        if ((env->cr[0] & CR0_PE_MASK)) {
1191
            static int count;
1192
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1193
                    count, intno, error_code, is_int,
1194
                    env->hflags & HF_CPL_MASK,
1195
                    env->segs[R_CS].selector, EIP,
1196
                    (int)env->segs[R_CS].base + EIP,
1197
                    env->segs[R_SS].selector, ESP);
1198
            if (intno == 0x0e) {
1199
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1200
            } else {
1201
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202
            }
1203
            fprintf(logfile, "\n");
1204
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1205
#if 0
1206
            {
1207
                int i;
1208
                uint8_t *ptr;
1209
                fprintf(logfile, "       code=");
1210
                ptr = env->segs[R_CS].base + env->eip;
1211
                for(i = 0; i < 16; i++) {
1212
                    fprintf(logfile, " %02x", ldub(ptr + i));
1213
                }
1214
                fprintf(logfile, "\n");
1215
            }
1216
#endif
1217
            count++;
1218
        }
1219
    }
1220
    if (env->cr[0] & CR0_PE_MASK) {
1221
#if TARGET_X86_64
1222
        if (env->hflags & HF_LMA_MASK) {
1223
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1224
        } else
1225
#endif
1226
        {
1227
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1228
        }
1229
    } else {
1230
        do_interrupt_real(intno, is_int, error_code, next_eip);
1231
    }
1232
}
1233

    
1234
/*
1235
 * Check nested exceptions and change to double or triple fault if
1236
 * needed. It should only be called, if this is not an interrupt.
1237
 * Returns the new exception number.
1238
 */
1239
static int check_exception(int intno, int *error_code)
1240
{
1241
    char first_contributory = env->old_exception == 0 ||
1242
                              (env->old_exception >= 10 &&
1243
                               env->old_exception <= 13);
1244
    char second_contributory = intno == 0 ||
1245
                               (intno >= 10 && intno <= 13);
1246

    
1247
    if (loglevel & CPU_LOG_INT)
1248
        fprintf(logfile, "check_exception old: %x new %x\n",
1249
                env->old_exception, intno);
1250

    
1251
    if (env->old_exception == EXCP08_DBLE)
1252
        cpu_abort(env, "triple fault");
1253

    
1254
    if ((first_contributory && second_contributory)
1255
        || (env->old_exception == EXCP0E_PAGE &&
1256
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1257
        intno = EXCP08_DBLE;
1258
        *error_code = 0;
1259
    }
1260

    
1261
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1262
        (intno == EXCP08_DBLE))
1263
        env->old_exception = intno;
1264

    
1265
    return intno;
1266
}
1267

    
1268
/*
1269
 * Signal an interruption. It is executed in the main CPU loop.
1270
 * is_int is TRUE if coming from the int instruction. next_eip is the
1271
 * EIP value AFTER the interrupt instruction. It is only relevant if
1272
 * is_int is TRUE.
1273
 */
1274
void raise_interrupt(int intno, int is_int, int error_code,
1275
                     int next_eip_addend)
1276
{
1277
    if (!is_int) {
1278
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1279
        intno = check_exception(intno, &error_code);
1280
    }
1281

    
1282
    env->exception_index = intno;
1283
    env->error_code = error_code;
1284
    env->exception_is_int = is_int;
1285
    env->exception_next_eip = env->eip + next_eip_addend;
1286
    cpu_loop_exit();
1287
}
1288

    
1289
/* same as raise_exception_err, but do not restore global registers */
1290
static void raise_exception_err_norestore(int exception_index, int error_code)
1291
{
1292
    exception_index = check_exception(exception_index, &error_code);
1293

    
1294
    env->exception_index = exception_index;
1295
    env->error_code = error_code;
1296
    env->exception_is_int = 0;
1297
    env->exception_next_eip = 0;
1298
    longjmp(env->jmp_env, 1);
1299
}
1300

    
1301
/* shortcuts to generate exceptions */
1302

    
1303
void (raise_exception_err)(int exception_index, int error_code)
1304
{
1305
    raise_interrupt(exception_index, 0, error_code, 0);
1306
}
1307

    
1308
void raise_exception(int exception_index)
1309
{
1310
    raise_interrupt(exception_index, 0, 0, 0);
1311
}
1312

    
1313
/* SMM support */
1314

    
1315
#if defined(CONFIG_USER_ONLY)
1316

    
1317
void do_smm_enter(void)
1318
{
1319
}
1320

    
1321
void helper_rsm(void)
1322
{
1323
}
1324

    
1325
#else
1326

    
1327
#ifdef TARGET_X86_64
1328
#define SMM_REVISION_ID 0x00020064
1329
#else
1330
#define SMM_REVISION_ID 0x00020000
1331
#endif
1332

    
1333
void do_smm_enter(void)
1334
{
1335
    target_ulong sm_state;
1336
    SegmentCache *dt;
1337
    int i, offset;
1338

    
1339
    if (loglevel & CPU_LOG_INT) {
1340
        fprintf(logfile, "SMM: enter\n");
1341
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1342
    }
1343

    
1344
    env->hflags |= HF_SMM_MASK;
1345
    cpu_smm_update(env);
1346

    
1347
    sm_state = env->smbase + 0x8000;
1348

    
1349
#ifdef TARGET_X86_64
1350
    for(i = 0; i < 6; i++) {
1351
        dt = &env->segs[i];
1352
        offset = 0x7e00 + i * 16;
1353
        stw_phys(sm_state + offset, dt->selector);
1354
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1355
        stl_phys(sm_state + offset + 4, dt->limit);
1356
        stq_phys(sm_state + offset + 8, dt->base);
1357
    }
1358

    
1359
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1360
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1361

    
1362
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1363
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1364
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1365
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1366

    
1367
    stq_phys(sm_state + 0x7e88, env->idt.base);
1368
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1369

    
1370
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1371
    stq_phys(sm_state + 0x7e98, env->tr.base);
1372
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1373
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1374

    
1375
    stq_phys(sm_state + 0x7ed0, env->efer);
1376

    
1377
    stq_phys(sm_state + 0x7ff8, EAX);
1378
    stq_phys(sm_state + 0x7ff0, ECX);
1379
    stq_phys(sm_state + 0x7fe8, EDX);
1380
    stq_phys(sm_state + 0x7fe0, EBX);
1381
    stq_phys(sm_state + 0x7fd8, ESP);
1382
    stq_phys(sm_state + 0x7fd0, EBP);
1383
    stq_phys(sm_state + 0x7fc8, ESI);
1384
    stq_phys(sm_state + 0x7fc0, EDI);
1385
    for(i = 8; i < 16; i++)
1386
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1387
    stq_phys(sm_state + 0x7f78, env->eip);
1388
    stl_phys(sm_state + 0x7f70, compute_eflags());
1389
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1390
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1391

    
1392
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1393
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1394
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1395

    
1396
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1397
    stl_phys(sm_state + 0x7f00, env->smbase);
1398
#else
1399
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1400
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1401
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1402
    stl_phys(sm_state + 0x7ff0, env->eip);
1403
    stl_phys(sm_state + 0x7fec, EDI);
1404
    stl_phys(sm_state + 0x7fe8, ESI);
1405
    stl_phys(sm_state + 0x7fe4, EBP);
1406
    stl_phys(sm_state + 0x7fe0, ESP);
1407
    stl_phys(sm_state + 0x7fdc, EBX);
1408
    stl_phys(sm_state + 0x7fd8, EDX);
1409
    stl_phys(sm_state + 0x7fd4, ECX);
1410
    stl_phys(sm_state + 0x7fd0, EAX);
1411
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1412
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1413

    
1414
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1415
    stl_phys(sm_state + 0x7f64, env->tr.base);
1416
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1417
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1418

    
1419
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1420
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1421
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1422
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1423

    
1424
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1425
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1426

    
1427
    stl_phys(sm_state + 0x7f58, env->idt.base);
1428
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1429

    
1430
    for(i = 0; i < 6; i++) {
1431
        dt = &env->segs[i];
1432
        if (i < 3)
1433
            offset = 0x7f84 + i * 12;
1434
        else
1435
            offset = 0x7f2c + (i - 3) * 12;
1436
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1437
        stl_phys(sm_state + offset + 8, dt->base);
1438
        stl_phys(sm_state + offset + 4, dt->limit);
1439
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1440
    }
1441
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1442

    
1443
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1444
    stl_phys(sm_state + 0x7ef8, env->smbase);
1445
#endif
1446
    /* init SMM cpu state */
1447

    
1448
#ifdef TARGET_X86_64
1449
    env->efer = 0;
1450
    env->hflags &= ~HF_LMA_MASK;
1451
#endif
1452
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1453
    env->eip = 0x00008000;
1454
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1455
                           0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1460
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1461

    
1462
    cpu_x86_update_cr0(env,
1463
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1464
    cpu_x86_update_cr4(env, 0);
1465
    env->dr[7] = 0x00000400;
1466
    CC_OP = CC_OP_EFLAGS;
1467
}
1468

    
1469
void helper_rsm(void)
1470
{
1471
    target_ulong sm_state;
1472
    int i, offset;
1473
    uint32_t val;
1474

    
1475
    sm_state = env->smbase + 0x8000;
1476
#ifdef TARGET_X86_64
1477
    env->efer = ldq_phys(sm_state + 0x7ed0);
1478
    if (env->efer & MSR_EFER_LMA)
1479
        env->hflags |= HF_LMA_MASK;
1480
    else
1481
        env->hflags &= ~HF_LMA_MASK;
1482

    
1483
    for(i = 0; i < 6; i++) {
1484
        offset = 0x7e00 + i * 16;
1485
        cpu_x86_load_seg_cache(env, i,
1486
                               lduw_phys(sm_state + offset),
1487
                               ldq_phys(sm_state + offset + 8),
1488
                               ldl_phys(sm_state + offset + 4),
1489
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1490
    }
1491

    
1492
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1493
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1494

    
1495
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1496
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1497
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1498
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1499

    
1500
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1501
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1502

    
1503
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1504
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1505
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1506
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1507

    
1508
    EAX = ldq_phys(sm_state + 0x7ff8);
1509
    ECX = ldq_phys(sm_state + 0x7ff0);
1510
    EDX = ldq_phys(sm_state + 0x7fe8);
1511
    EBX = ldq_phys(sm_state + 0x7fe0);
1512
    ESP = ldq_phys(sm_state + 0x7fd8);
1513
    EBP = ldq_phys(sm_state + 0x7fd0);
1514
    ESI = ldq_phys(sm_state + 0x7fc8);
1515
    EDI = ldq_phys(sm_state + 0x7fc0);
1516
    for(i = 8; i < 16; i++)
1517
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1518
    env->eip = ldq_phys(sm_state + 0x7f78);
1519
    load_eflags(ldl_phys(sm_state + 0x7f70),
1520
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1522
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1523

    
1524
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1525
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1526
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1527

    
1528
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1529
    if (val & 0x20000) {
1530
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1531
    }
1532
#else
1533
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1534
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1535
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1536
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537
    env->eip = ldl_phys(sm_state + 0x7ff0);
1538
    EDI = ldl_phys(sm_state + 0x7fec);
1539
    ESI = ldl_phys(sm_state + 0x7fe8);
1540
    EBP = ldl_phys(sm_state + 0x7fe4);
1541
    ESP = ldl_phys(sm_state + 0x7fe0);
1542
    EBX = ldl_phys(sm_state + 0x7fdc);
1543
    EDX = ldl_phys(sm_state + 0x7fd8);
1544
    ECX = ldl_phys(sm_state + 0x7fd4);
1545
    EAX = ldl_phys(sm_state + 0x7fd0);
1546
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1547
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1548

    
1549
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1550
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1551
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1552
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1553

    
1554
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1555
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1556
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1557
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1558

    
1559
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1560
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1561

    
1562
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1563
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1564

    
1565
    for(i = 0; i < 6; i++) {
1566
        if (i < 3)
1567
            offset = 0x7f84 + i * 12;
1568
        else
1569
            offset = 0x7f2c + (i - 3) * 12;
1570
        cpu_x86_load_seg_cache(env, i,
1571
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1572
                               ldl_phys(sm_state + offset + 8),
1573
                               ldl_phys(sm_state + offset + 4),
1574
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1575
    }
1576
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1577

    
1578
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1579
    if (val & 0x20000) {
1580
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1581
    }
1582
#endif
1583
    CC_OP = CC_OP_EFLAGS;
1584
    env->hflags &= ~HF_SMM_MASK;
1585
    cpu_smm_update(env);
1586

    
1587
    if (loglevel & CPU_LOG_INT) {
1588
        fprintf(logfile, "SMM: after RSM\n");
1589
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1590
    }
1591
}
1592

    
1593
#endif /* !CONFIG_USER_ONLY */
1594

    
1595

    
1596
#ifdef BUGGY_GCC_DIV64
1597
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1598
   call it from another function */
1599
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1600
{
1601
    *q_ptr = num / den;
1602
    return num % den;
1603
}
1604

    
1605
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1606
{
1607
    *q_ptr = num / den;
1608
    return num % den;
1609
}
1610
#endif
1611

    
1612
void helper_divl_EAX_T0(target_ulong t0)
1613
{
1614
    unsigned int den, r;
1615
    uint64_t num, q;
1616

    
1617
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1618
    den = t0;
1619
    if (den == 0) {
1620
        raise_exception(EXCP00_DIVZ);
1621
    }
1622
#ifdef BUGGY_GCC_DIV64
1623
    r = div32(&q, num, den);
1624
#else
1625
    q = (num / den);
1626
    r = (num % den);
1627
#endif
1628
    if (q > 0xffffffff)
1629
        raise_exception(EXCP00_DIVZ);
1630
    EAX = (uint32_t)q;
1631
    EDX = (uint32_t)r;
1632
}
1633

    
1634
void helper_idivl_EAX_T0(target_ulong t0)
1635
{
1636
    int den, r;
1637
    int64_t num, q;
1638

    
1639
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1640
    den = t0;
1641
    if (den == 0) {
1642
        raise_exception(EXCP00_DIVZ);
1643
    }
1644
#ifdef BUGGY_GCC_DIV64
1645
    r = idiv32(&q, num, den);
1646
#else
1647
    q = (num / den);
1648
    r = (num % den);
1649
#endif
1650
    if (q != (int32_t)q)
1651
        raise_exception(EXCP00_DIVZ);
1652
    EAX = (uint32_t)q;
1653
    EDX = (uint32_t)r;
1654
}
1655

    
1656
void helper_cmpxchg8b(void)
1657
{
1658
    uint64_t d;
1659
    int eflags;
1660

    
1661
    eflags = cc_table[CC_OP].compute_all();
1662
    d = ldq(A0);
1663
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1664
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1665
        eflags |= CC_Z;
1666
    } else {
1667
        EDX = d >> 32;
1668
        EAX = d;
1669
        eflags &= ~CC_Z;
1670
    }
1671
    CC_SRC = eflags;
1672
}
1673

    
1674
void helper_single_step(void)
1675
{
1676
    env->dr[6] |= 0x4000;
1677
    raise_exception(EXCP01_SSTP);
1678
}
1679

    
1680
void helper_cpuid(void)
1681
{
1682
    uint32_t index;
1683
    index = (uint32_t)EAX;
1684

    
1685
    /* test if maximum index reached */
1686
    if (index & 0x80000000) {
1687
        if (index > env->cpuid_xlevel)
1688
            index = env->cpuid_level;
1689
    } else {
1690
        if (index > env->cpuid_level)
1691
            index = env->cpuid_level;
1692
    }
1693

    
1694
    switch(index) {
1695
    case 0:
1696
        EAX = env->cpuid_level;
1697
        EBX = env->cpuid_vendor1;
1698
        EDX = env->cpuid_vendor2;
1699
        ECX = env->cpuid_vendor3;
1700
        break;
1701
    case 1:
1702
        EAX = env->cpuid_version;
1703
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1704
        ECX = env->cpuid_ext_features;
1705
        EDX = env->cpuid_features;
1706
        break;
1707
    case 2:
1708
        /* cache info: needed for Pentium Pro compatibility */
1709
        EAX = 1;
1710
        EBX = 0;
1711
        ECX = 0;
1712
        EDX = 0x2c307d;
1713
        break;
1714
    case 0x80000000:
1715
        EAX = env->cpuid_xlevel;
1716
        EBX = env->cpuid_vendor1;
1717
        EDX = env->cpuid_vendor2;
1718
        ECX = env->cpuid_vendor3;
1719
        break;
1720
    case 0x80000001:
1721
        EAX = env->cpuid_features;
1722
        EBX = 0;
1723
        ECX = env->cpuid_ext3_features;
1724
        EDX = env->cpuid_ext2_features;
1725
        break;
1726
    case 0x80000002:
1727
    case 0x80000003:
1728
    case 0x80000004:
1729
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1730
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1731
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1732
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1733
        break;
1734
    case 0x80000005:
1735
        /* cache info (L1 cache) */
1736
        EAX = 0x01ff01ff;
1737
        EBX = 0x01ff01ff;
1738
        ECX = 0x40020140;
1739
        EDX = 0x40020140;
1740
        break;
1741
    case 0x80000006:
1742
        /* cache info (L2 cache) */
1743
        EAX = 0;
1744
        EBX = 0x42004200;
1745
        ECX = 0x02008140;
1746
        EDX = 0;
1747
        break;
1748
    case 0x80000008:
1749
        /* virtual & phys address size in low 2 bytes. */
1750
/* XXX: This value must match the one used in the MMU code. */ 
1751
#if defined(TARGET_X86_64)
1752
#  if defined(USE_KQEMU)
1753
        EAX = 0x00003020;        /* 48 bits virtual, 32 bits physical */
1754
#  else
1755
/* XXX: The physical address space is limited to 42 bits in exec.c. */
1756
        EAX = 0x00003028;        /* 48 bits virtual, 40 bits physical */
1757
#  endif
1758
#else
1759
# if defined(USE_KQEMU)
1760
        EAX = 0x00000020;        /* 32 bits physical */
1761
#  else
1762
        EAX = 0x00000024;        /* 36 bits physical */
1763
#  endif
1764
#endif
1765
        EBX = 0;
1766
        ECX = 0;
1767
        EDX = 0;
1768
        break;
1769
    case 0x8000000A:
1770
        EAX = 0x00000001;
1771
        EBX = 0;
1772
        ECX = 0;
1773
        EDX = 0;
1774
        break;
1775
    default:
1776
        /* reserved values: zero */
1777
        EAX = 0;
1778
        EBX = 0;
1779
        ECX = 0;
1780
        EDX = 0;
1781
        break;
1782
    }
1783
}
1784

    
1785
void helper_enter_level(int level, int data32)
1786
{
1787
    target_ulong ssp;
1788
    uint32_t esp_mask, esp, ebp;
1789

    
1790
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1791
    ssp = env->segs[R_SS].base;
1792
    ebp = EBP;
1793
    esp = ESP;
1794
    if (data32) {
1795
        /* 32 bit */
1796
        esp -= 4;
1797
        while (--level) {
1798
            esp -= 4;
1799
            ebp -= 4;
1800
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1801
        }
1802
        esp -= 4;
1803
        stl(ssp + (esp & esp_mask), T1);
1804
    } else {
1805
        /* 16 bit */
1806
        esp -= 2;
1807
        while (--level) {
1808
            esp -= 2;
1809
            ebp -= 2;
1810
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1811
        }
1812
        esp -= 2;
1813
        stw(ssp + (esp & esp_mask), T1);
1814
    }
1815
}
1816

    
1817
#ifdef TARGET_X86_64
1818
void helper_enter64_level(int level, int data64)
1819
{
1820
    target_ulong esp, ebp;
1821
    ebp = EBP;
1822
    esp = ESP;
1823

    
1824
    if (data64) {
1825
        /* 64 bit */
1826
        esp -= 8;
1827
        while (--level) {
1828
            esp -= 8;
1829
            ebp -= 8;
1830
            stq(esp, ldq(ebp));
1831
        }
1832
        esp -= 8;
1833
        stq(esp, T1);
1834
    } else {
1835
        /* 16 bit */
1836
        esp -= 2;
1837
        while (--level) {
1838
            esp -= 2;
1839
            ebp -= 2;
1840
            stw(esp, lduw(ebp));
1841
        }
1842
        esp -= 2;
1843
        stw(esp, T1);
1844
    }
1845
}
1846
#endif
1847

    
1848
void helper_lldt_T0(void)
1849
{
1850
    int selector;
1851
    SegmentCache *dt;
1852
    uint32_t e1, e2;
1853
    int index, entry_limit;
1854
    target_ulong ptr;
1855

    
1856
    selector = T0 & 0xffff;
1857
    if ((selector & 0xfffc) == 0) {
1858
        /* XXX: NULL selector case: invalid LDT */
1859
        env->ldt.base = 0;
1860
        env->ldt.limit = 0;
1861
    } else {
1862
        if (selector & 0x4)
1863
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1864
        dt = &env->gdt;
1865
        index = selector & ~7;
1866
#ifdef TARGET_X86_64
1867
        if (env->hflags & HF_LMA_MASK)
1868
            entry_limit = 15;
1869
        else
1870
#endif
1871
            entry_limit = 7;
1872
        if ((index + entry_limit) > dt->limit)
1873
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1874
        ptr = dt->base + index;
1875
        e1 = ldl_kernel(ptr);
1876
        e2 = ldl_kernel(ptr + 4);
1877
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1878
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1879
        if (!(e2 & DESC_P_MASK))
1880
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1881
#ifdef TARGET_X86_64
1882
        if (env->hflags & HF_LMA_MASK) {
1883
            uint32_t e3;
1884
            e3 = ldl_kernel(ptr + 8);
1885
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1886
            env->ldt.base |= (target_ulong)e3 << 32;
1887
        } else
1888
#endif
1889
        {
1890
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1891
        }
1892
    }
1893
    env->ldt.selector = selector;
1894
}
1895

    
1896
void helper_ltr_T0(void)
1897
{
1898
    int selector;
1899
    SegmentCache *dt;
1900
    uint32_t e1, e2;
1901
    int index, type, entry_limit;
1902
    target_ulong ptr;
1903

    
1904
    selector = T0 & 0xffff;
1905
    if ((selector & 0xfffc) == 0) {
1906
        /* NULL selector case: invalid TR */
1907
        env->tr.base = 0;
1908
        env->tr.limit = 0;
1909
        env->tr.flags = 0;
1910
    } else {
1911
        if (selector & 0x4)
1912
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1913
        dt = &env->gdt;
1914
        index = selector & ~7;
1915
#ifdef TARGET_X86_64
1916
        if (env->hflags & HF_LMA_MASK)
1917
            entry_limit = 15;
1918
        else
1919
#endif
1920
            entry_limit = 7;
1921
        if ((index + entry_limit) > dt->limit)
1922
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1923
        ptr = dt->base + index;
1924
        e1 = ldl_kernel(ptr);
1925
        e2 = ldl_kernel(ptr + 4);
1926
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1927
        if ((e2 & DESC_S_MASK) ||
1928
            (type != 1 && type != 9))
1929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1930
        if (!(e2 & DESC_P_MASK))
1931
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1932
#ifdef TARGET_X86_64
1933
        if (env->hflags & HF_LMA_MASK) {
1934
            uint32_t e3, e4;
1935
            e3 = ldl_kernel(ptr + 8);
1936
            e4 = ldl_kernel(ptr + 12);
1937
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1938
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1939
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1940
            env->tr.base |= (target_ulong)e3 << 32;
1941
        } else
1942
#endif
1943
        {
1944
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1945
        }
1946
        e2 |= DESC_TSS_BUSY_MASK;
1947
        stl_kernel(ptr + 4, e2);
1948
    }
1949
    env->tr.selector = selector;
1950
}
1951

    
1952
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1953
void load_seg(int seg_reg, int selector)
1954
{
1955
    uint32_t e1, e2;
1956
    int cpl, dpl, rpl;
1957
    SegmentCache *dt;
1958
    int index;
1959
    target_ulong ptr;
1960

    
1961
    selector &= 0xffff;
1962
    cpl = env->hflags & HF_CPL_MASK;
1963
    if ((selector & 0xfffc) == 0) {
1964
        /* null selector case */
1965
        if (seg_reg == R_SS
1966
#ifdef TARGET_X86_64
1967
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1968
#endif
1969
            )
1970
            raise_exception_err(EXCP0D_GPF, 0);
1971
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1972
    } else {
1973

    
1974
        if (selector & 0x4)
1975
            dt = &env->ldt;
1976
        else
1977
            dt = &env->gdt;
1978
        index = selector & ~7;
1979
        if ((index + 7) > dt->limit)
1980
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1981
        ptr = dt->base + index;
1982
        e1 = ldl_kernel(ptr);
1983
        e2 = ldl_kernel(ptr + 4);
1984

    
1985
        if (!(e2 & DESC_S_MASK))
1986
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1987
        rpl = selector & 3;
1988
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1989
        if (seg_reg == R_SS) {
1990
            /* must be writable segment */
1991
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1992
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1993
            if (rpl != cpl || dpl != cpl)
1994
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1995
        } else {
1996
            /* must be readable segment */
1997
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1998
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1999

    
2000
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2001
                /* if not conforming code, test rights */
2002
                if (dpl < cpl || dpl < rpl)
2003
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2004
            }
2005
        }
2006

    
2007
        if (!(e2 & DESC_P_MASK)) {
2008
            if (seg_reg == R_SS)
2009
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2010
            else
2011
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2012
        }
2013

    
2014
        /* set the access bit if not already set */
2015
        if (!(e2 & DESC_A_MASK)) {
2016
            e2 |= DESC_A_MASK;
2017
            stl_kernel(ptr + 4, e2);
2018
        }
2019

    
2020
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2021
                       get_seg_base(e1, e2),
2022
                       get_seg_limit(e1, e2),
2023
                       e2);
2024
#if 0
2025
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2026
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2027
#endif
2028
    }
2029
}
2030

    
2031
/* protected mode jump */
2032
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2033
{
2034
    int new_cs, gate_cs, type;
2035
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2036
    target_ulong new_eip, next_eip;
2037

    
2038
    new_cs = T0;
2039
    new_eip = T1;
2040
    if ((new_cs & 0xfffc) == 0)
2041
        raise_exception_err(EXCP0D_GPF, 0);
2042
    if (load_segment(&e1, &e2, new_cs) != 0)
2043
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2044
    cpl = env->hflags & HF_CPL_MASK;
2045
    if (e2 & DESC_S_MASK) {
2046
        if (!(e2 & DESC_CS_MASK))
2047
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2048
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2049
        if (e2 & DESC_C_MASK) {
2050
            /* conforming code segment */
2051
            if (dpl > cpl)
2052
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2053
        } else {
2054
            /* non conforming code segment */
2055
            rpl = new_cs & 3;
2056
            if (rpl > cpl)
2057
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2058
            if (dpl != cpl)
2059
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2060
        }
2061
        if (!(e2 & DESC_P_MASK))
2062
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2063
        limit = get_seg_limit(e1, e2);
2064
        if (new_eip > limit &&
2065
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2066
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2067
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2068
                       get_seg_base(e1, e2), limit, e2);
2069
        EIP = new_eip;
2070
    } else {
2071
        /* jump to call or task gate */
2072
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2073
        rpl = new_cs & 3;
2074
        cpl = env->hflags & HF_CPL_MASK;
2075
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2076
        switch(type) {
2077
        case 1: /* 286 TSS */
2078
        case 9: /* 386 TSS */
2079
        case 5: /* task gate */
2080
            if (dpl < cpl || dpl < rpl)
2081
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2082
            next_eip = env->eip + next_eip_addend;
2083
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2084
            CC_OP = CC_OP_EFLAGS;
2085
            break;
2086
        case 4: /* 286 call gate */
2087
        case 12: /* 386 call gate */
2088
            if ((dpl < cpl) || (dpl < rpl))
2089
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2090
            if (!(e2 & DESC_P_MASK))
2091
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2092
            gate_cs = e1 >> 16;
2093
            new_eip = (e1 & 0xffff);
2094
            if (type == 12)
2095
                new_eip |= (e2 & 0xffff0000);
2096
            if (load_segment(&e1, &e2, gate_cs) != 0)
2097
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2098
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2099
            /* must be code segment */
2100
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2101
                 (DESC_S_MASK | DESC_CS_MASK)))
2102
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2103
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2104
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2105
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2106
            if (!(e2 & DESC_P_MASK))
2107
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2108
            limit = get_seg_limit(e1, e2);
2109
            if (new_eip > limit)
2110
                raise_exception_err(EXCP0D_GPF, 0);
2111
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2112
                                   get_seg_base(e1, e2), limit, e2);
2113
            EIP = new_eip;
2114
            break;
2115
        default:
2116
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2117
            break;
2118
        }
2119
    }
2120
}
2121

    
2122
/* real mode call */
2123
void helper_lcall_real_T0_T1(int shift, int next_eip)
2124
{
2125
    int new_cs, new_eip;
2126
    uint32_t esp, esp_mask;
2127
    target_ulong ssp;
2128

    
2129
    new_cs = T0;
2130
    new_eip = T1;
2131
    esp = ESP;
2132
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2133
    ssp = env->segs[R_SS].base;
2134
    if (shift) {
2135
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2136
        PUSHL(ssp, esp, esp_mask, next_eip);
2137
    } else {
2138
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2139
        PUSHW(ssp, esp, esp_mask, next_eip);
2140
    }
2141

    
2142
    SET_ESP(esp, esp_mask);
2143
    env->eip = new_eip;
2144
    env->segs[R_CS].selector = new_cs;
2145
    env->segs[R_CS].base = (new_cs << 4);
2146
}
2147

    
2148
/* protected mode call */
2149
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2150
{
2151
    int new_cs, new_stack, i;
2152
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2153
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2154
    uint32_t val, limit, old_sp_mask;
2155
    target_ulong ssp, old_ssp, next_eip, new_eip;
2156

    
2157
    new_cs = T0;
2158
    new_eip = T1;
2159
    next_eip = env->eip + next_eip_addend;
2160
#ifdef DEBUG_PCALL
2161
    if (loglevel & CPU_LOG_PCALL) {
2162
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2163
                new_cs, (uint32_t)new_eip, shift);
2164
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2165
    }
2166
#endif
2167
    if ((new_cs & 0xfffc) == 0)
2168
        raise_exception_err(EXCP0D_GPF, 0);
2169
    if (load_segment(&e1, &e2, new_cs) != 0)
2170
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171
    cpl = env->hflags & HF_CPL_MASK;
2172
#ifdef DEBUG_PCALL
2173
    if (loglevel & CPU_LOG_PCALL) {
2174
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2175
    }
2176
#endif
2177
    if (e2 & DESC_S_MASK) {
2178
        if (!(e2 & DESC_CS_MASK))
2179
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2180
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2181
        if (e2 & DESC_C_MASK) {
2182
            /* conforming code segment */
2183
            if (dpl > cpl)
2184
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2185
        } else {
2186
            /* non conforming code segment */
2187
            rpl = new_cs & 3;
2188
            if (rpl > cpl)
2189
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2190
            if (dpl != cpl)
2191
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2192
        }
2193
        if (!(e2 & DESC_P_MASK))
2194
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2195

    
2196
#ifdef TARGET_X86_64
2197
        /* XXX: check 16/32 bit cases in long mode */
2198
        if (shift == 2) {
2199
            target_ulong rsp;
2200
            /* 64 bit case */
2201
            rsp = ESP;
2202
            PUSHQ(rsp, env->segs[R_CS].selector);
2203
            PUSHQ(rsp, next_eip);
2204
            /* from this point, not restartable */
2205
            ESP = rsp;
2206
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2207
                                   get_seg_base(e1, e2),
2208
                                   get_seg_limit(e1, e2), e2);
2209
            EIP = new_eip;
2210
        } else
2211
#endif
2212
        {
2213
            sp = ESP;
2214
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2215
            ssp = env->segs[R_SS].base;
2216
            if (shift) {
2217
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2218
                PUSHL(ssp, sp, sp_mask, next_eip);
2219
            } else {
2220
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2221
                PUSHW(ssp, sp, sp_mask, next_eip);
2222
            }
2223

    
2224
            limit = get_seg_limit(e1, e2);
2225
            if (new_eip > limit)
2226
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2227
            /* from this point, not restartable */
2228
            SET_ESP(sp, sp_mask);
2229
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2230
                                   get_seg_base(e1, e2), limit, e2);
2231
            EIP = new_eip;
2232
        }
2233
    } else {
2234
        /* check gate type */
2235
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237
        rpl = new_cs & 3;
2238
        switch(type) {
2239
        case 1: /* available 286 TSS */
2240
        case 9: /* available 386 TSS */
2241
        case 5: /* task gate */
2242
            if (dpl < cpl || dpl < rpl)
2243
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2245
            CC_OP = CC_OP_EFLAGS;
2246
            return;
2247
        case 4: /* 286 call gate */
2248
        case 12: /* 386 call gate */
2249
            break;
2250
        default:
2251
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252
            break;
2253
        }
2254
        shift = type >> 3;
2255

    
2256
        if (dpl < cpl || dpl < rpl)
2257
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2258
        /* check valid bit */
2259
        if (!(e2 & DESC_P_MASK))
2260
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2261
        selector = e1 >> 16;
2262
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2263
        param_count = e2 & 0x1f;
2264
        if ((selector & 0xfffc) == 0)
2265
            raise_exception_err(EXCP0D_GPF, 0);
2266

    
2267
        if (load_segment(&e1, &e2, selector) != 0)
2268
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2269
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2270
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2271
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2272
        if (dpl > cpl)
2273
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2274
        if (!(e2 & DESC_P_MASK))
2275
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2276

    
2277
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2278
            /* to inner privilege */
2279
            get_ss_esp_from_tss(&ss, &sp, dpl);
2280
#ifdef DEBUG_PCALL
2281
            if (loglevel & CPU_LOG_PCALL)
2282
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2283
                        ss, sp, param_count, ESP);
2284
#endif
2285
            if ((ss & 0xfffc) == 0)
2286
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2287
            if ((ss & 3) != dpl)
2288
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2289
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2290
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2291
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2292
            if (ss_dpl != dpl)
2293
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2294
            if (!(ss_e2 & DESC_S_MASK) ||
2295
                (ss_e2 & DESC_CS_MASK) ||
2296
                !(ss_e2 & DESC_W_MASK))
2297
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2298
            if (!(ss_e2 & DESC_P_MASK))
2299
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2300

    
2301
            //            push_size = ((param_count * 2) + 8) << shift;
2302

    
2303
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2304
            old_ssp = env->segs[R_SS].base;
2305

    
2306
            sp_mask = get_sp_mask(ss_e2);
2307
            ssp = get_seg_base(ss_e1, ss_e2);
2308
            if (shift) {
2309
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2310
                PUSHL(ssp, sp, sp_mask, ESP);
2311
                for(i = param_count - 1; i >= 0; i--) {
2312
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2313
                    PUSHL(ssp, sp, sp_mask, val);
2314
                }
2315
            } else {
2316
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2317
                PUSHW(ssp, sp, sp_mask, ESP);
2318
                for(i = param_count - 1; i >= 0; i--) {
2319
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2320
                    PUSHW(ssp, sp, sp_mask, val);
2321
                }
2322
            }
2323
            new_stack = 1;
2324
        } else {
2325
            /* to same privilege */
2326
            sp = ESP;
2327
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2328
            ssp = env->segs[R_SS].base;
2329
            //            push_size = (4 << shift);
2330
            new_stack = 0;
2331
        }
2332

    
2333
        if (shift) {
2334
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2335
            PUSHL(ssp, sp, sp_mask, next_eip);
2336
        } else {
2337
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2338
            PUSHW(ssp, sp, sp_mask, next_eip);
2339
        }
2340

    
2341
        /* from this point, not restartable */
2342

    
2343
        if (new_stack) {
2344
            ss = (ss & ~3) | dpl;
2345
            cpu_x86_load_seg_cache(env, R_SS, ss,
2346
                                   ssp,
2347
                                   get_seg_limit(ss_e1, ss_e2),
2348
                                   ss_e2);
2349
        }
2350

    
2351
        selector = (selector & ~3) | dpl;
2352
        cpu_x86_load_seg_cache(env, R_CS, selector,
2353
                       get_seg_base(e1, e2),
2354
                       get_seg_limit(e1, e2),
2355
                       e2);
2356
        cpu_x86_set_cpl(env, dpl);
2357
        SET_ESP(sp, sp_mask);
2358
        EIP = offset;
2359
    }
2360
#ifdef USE_KQEMU
2361
    if (kqemu_is_ok(env)) {
2362
        env->exception_index = -1;
2363
        cpu_loop_exit();
2364
    }
2365
#endif
2366
}
2367

    
2368
/* real and vm86 mode iret */
2369
void helper_iret_real(int shift)
2370
{
2371
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2372
    target_ulong ssp;
2373
    int eflags_mask;
2374

    
2375
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2376
    sp = ESP;
2377
    ssp = env->segs[R_SS].base;
2378
    if (shift == 1) {
2379
        /* 32 bits */
2380
        POPL(ssp, sp, sp_mask, new_eip);
2381
        POPL(ssp, sp, sp_mask, new_cs);
2382
        new_cs &= 0xffff;
2383
        POPL(ssp, sp, sp_mask, new_eflags);
2384
    } else {
2385
        /* 16 bits */
2386
        POPW(ssp, sp, sp_mask, new_eip);
2387
        POPW(ssp, sp, sp_mask, new_cs);
2388
        POPW(ssp, sp, sp_mask, new_eflags);
2389
    }
2390
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2391
    load_seg_vm(R_CS, new_cs);
2392
    env->eip = new_eip;
2393
    if (env->eflags & VM_MASK)
2394
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2395
    else
2396
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2397
    if (shift == 0)
2398
        eflags_mask &= 0xffff;
2399
    load_eflags(new_eflags, eflags_mask);
2400
    env->hflags &= ~HF_NMI_MASK;
2401
}
2402

    
2403
static inline void validate_seg(int seg_reg, int cpl)
2404
{
2405
    int dpl;
2406
    uint32_t e2;
2407

    
2408
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2409
       they may still contain a valid base. I would be interested to
2410
       know how a real x86_64 CPU behaves */
2411
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2412
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2413
        return;
2414

    
2415
    e2 = env->segs[seg_reg].flags;
2416
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2418
        /* data or non conforming code segment */
2419
        if (dpl < cpl) {
2420
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2421
        }
2422
    }
2423
}
2424

    
2425
/* protected mode iret */
2426
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2427
{
2428
    uint32_t new_cs, new_eflags, new_ss;
2429
    uint32_t new_es, new_ds, new_fs, new_gs;
2430
    uint32_t e1, e2, ss_e1, ss_e2;
2431
    int cpl, dpl, rpl, eflags_mask, iopl;
2432
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2433

    
2434
#ifdef TARGET_X86_64
2435
    if (shift == 2)
2436
        sp_mask = -1;
2437
    else
2438
#endif
2439
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2440
    sp = ESP;
2441
    ssp = env->segs[R_SS].base;
2442
    new_eflags = 0; /* avoid warning */
2443
#ifdef TARGET_X86_64
2444
    if (shift == 2) {
2445
        POPQ(sp, new_eip);
2446
        POPQ(sp, new_cs);
2447
        new_cs &= 0xffff;
2448
        if (is_iret) {
2449
            POPQ(sp, new_eflags);
2450
        }
2451
    } else
2452
#endif
2453
    if (shift == 1) {
2454
        /* 32 bits */
2455
        POPL(ssp, sp, sp_mask, new_eip);
2456
        POPL(ssp, sp, sp_mask, new_cs);
2457
        new_cs &= 0xffff;
2458
        if (is_iret) {
2459
            POPL(ssp, sp, sp_mask, new_eflags);
2460
            if (new_eflags & VM_MASK)
2461
                goto return_to_vm86;
2462
        }
2463
    } else {
2464
        /* 16 bits */
2465
        POPW(ssp, sp, sp_mask, new_eip);
2466
        POPW(ssp, sp, sp_mask, new_cs);
2467
        if (is_iret)
2468
            POPW(ssp, sp, sp_mask, new_eflags);
2469
    }
2470
#ifdef DEBUG_PCALL
2471
    if (loglevel & CPU_LOG_PCALL) {
2472
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2473
                new_cs, new_eip, shift, addend);
2474
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2475
    }
2476
#endif
2477
    if ((new_cs & 0xfffc) == 0)
2478
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2479
    if (load_segment(&e1, &e2, new_cs) != 0)
2480
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2481
    if (!(e2 & DESC_S_MASK) ||
2482
        !(e2 & DESC_CS_MASK))
2483
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2484
    cpl = env->hflags & HF_CPL_MASK;
2485
    rpl = new_cs & 3;
2486
    if (rpl < cpl)
2487
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2488
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489
    if (e2 & DESC_C_MASK) {
2490
        if (dpl > rpl)
2491
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2492
    } else {
2493
        if (dpl != rpl)
2494
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2495
    }
2496
    if (!(e2 & DESC_P_MASK))
2497
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2498

    
2499
    sp += addend;
2500
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2501
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2502
        /* return to same priledge level */
2503
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2504
                       get_seg_base(e1, e2),
2505
                       get_seg_limit(e1, e2),
2506
                       e2);
2507
    } else {
2508
        /* return to different privilege level */
2509
#ifdef TARGET_X86_64
2510
        if (shift == 2) {
2511
            POPQ(sp, new_esp);
2512
            POPQ(sp, new_ss);
2513
            new_ss &= 0xffff;
2514
        } else
2515
#endif
2516
        if (shift == 1) {
2517
            /* 32 bits */
2518
            POPL(ssp, sp, sp_mask, new_esp);
2519
            POPL(ssp, sp, sp_mask, new_ss);
2520
            new_ss &= 0xffff;
2521
        } else {
2522
            /* 16 bits */
2523
            POPW(ssp, sp, sp_mask, new_esp);
2524
            POPW(ssp, sp, sp_mask, new_ss);
2525
        }
2526
#ifdef DEBUG_PCALL
2527
        if (loglevel & CPU_LOG_PCALL) {
2528
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2529
                    new_ss, new_esp);
2530
        }
2531
#endif
2532
        if ((new_ss & 0xfffc) == 0) {
2533
#ifdef TARGET_X86_64
2534
            /* NULL ss is allowed in long mode if cpl != 3*/
2535
            /* XXX: test CS64 ? */
2536
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2537
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2538
                                       0, 0xffffffff,
2539
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2540
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2541
                                       DESC_W_MASK | DESC_A_MASK);
2542
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2543
            } else
2544
#endif
2545
            {
2546
                raise_exception_err(EXCP0D_GPF, 0);
2547
            }
2548
        } else {
2549
            if ((new_ss & 3) != rpl)
2550
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2551
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2552
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2553
            if (!(ss_e2 & DESC_S_MASK) ||
2554
                (ss_e2 & DESC_CS_MASK) ||
2555
                !(ss_e2 & DESC_W_MASK))
2556
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2557
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2558
            if (dpl != rpl)
2559
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2560
            if (!(ss_e2 & DESC_P_MASK))
2561
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2562
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2563
                                   get_seg_base(ss_e1, ss_e2),
2564
                                   get_seg_limit(ss_e1, ss_e2),
2565
                                   ss_e2);
2566
        }
2567

    
2568
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2569
                       get_seg_base(e1, e2),
2570
                       get_seg_limit(e1, e2),
2571
                       e2);
2572
        cpu_x86_set_cpl(env, rpl);
2573
        sp = new_esp;
2574
#ifdef TARGET_X86_64
2575
        if (env->hflags & HF_CS64_MASK)
2576
            sp_mask = -1;
2577
        else
2578
#endif
2579
            sp_mask = get_sp_mask(ss_e2);
2580

    
2581
        /* validate data segments */
2582
        validate_seg(R_ES, rpl);
2583
        validate_seg(R_DS, rpl);
2584
        validate_seg(R_FS, rpl);
2585
        validate_seg(R_GS, rpl);
2586

    
2587
        sp += addend;
2588
    }
2589
    SET_ESP(sp, sp_mask);
2590
    env->eip = new_eip;
2591
    if (is_iret) {
2592
        /* NOTE: 'cpl' is the _old_ CPL */
2593
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2594
        if (cpl == 0)
2595
            eflags_mask |= IOPL_MASK;
2596
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2597
        if (cpl <= iopl)
2598
            eflags_mask |= IF_MASK;
2599
        if (shift == 0)
2600
            eflags_mask &= 0xffff;
2601
        load_eflags(new_eflags, eflags_mask);
2602
    }
2603
    return;
2604

    
2605
 return_to_vm86:
2606
    POPL(ssp, sp, sp_mask, new_esp);
2607
    POPL(ssp, sp, sp_mask, new_ss);
2608
    POPL(ssp, sp, sp_mask, new_es);
2609
    POPL(ssp, sp, sp_mask, new_ds);
2610
    POPL(ssp, sp, sp_mask, new_fs);
2611
    POPL(ssp, sp, sp_mask, new_gs);
2612

    
2613
    /* modify processor state */
2614
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2615
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2616
    load_seg_vm(R_CS, new_cs & 0xffff);
2617
    cpu_x86_set_cpl(env, 3);
2618
    load_seg_vm(R_SS, new_ss & 0xffff);
2619
    load_seg_vm(R_ES, new_es & 0xffff);
2620
    load_seg_vm(R_DS, new_ds & 0xffff);
2621
    load_seg_vm(R_FS, new_fs & 0xffff);
2622
    load_seg_vm(R_GS, new_gs & 0xffff);
2623

    
2624
    env->eip = new_eip & 0xffff;
2625
    ESP = new_esp;
2626
}
2627

    
2628
void helper_iret_protected(int shift, int next_eip)
2629
{
2630
    int tss_selector, type;
2631
    uint32_t e1, e2;
2632

    
2633
    /* specific case for TSS */
2634
    if (env->eflags & NT_MASK) {
2635
#ifdef TARGET_X86_64
2636
        if (env->hflags & HF_LMA_MASK)
2637
            raise_exception_err(EXCP0D_GPF, 0);
2638
#endif
2639
        tss_selector = lduw_kernel(env->tr.base + 0);
2640
        if (tss_selector & 4)
2641
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2642
        if (load_segment(&e1, &e2, tss_selector) != 0)
2643
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2644
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2645
        /* NOTE: we check both segment and busy TSS */
2646
        if (type != 3)
2647
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2648
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2649
    } else {
2650
        helper_ret_protected(shift, 1, 0);
2651
    }
2652
    env->hflags &= ~HF_NMI_MASK;
2653
#ifdef USE_KQEMU
2654
    if (kqemu_is_ok(env)) {
2655
        CC_OP = CC_OP_EFLAGS;
2656
        env->exception_index = -1;
2657
        cpu_loop_exit();
2658
    }
2659
#endif
2660
}
2661

    
2662
void helper_lret_protected(int shift, int addend)
2663
{
2664
    helper_ret_protected(shift, 0, addend);
2665
#ifdef USE_KQEMU
2666
    if (kqemu_is_ok(env)) {
2667
        env->exception_index = -1;
2668
        cpu_loop_exit();
2669
    }
2670
#endif
2671
}
2672

    
2673
void helper_sysenter(void)
2674
{
2675
    if (env->sysenter_cs == 0) {
2676
        raise_exception_err(EXCP0D_GPF, 0);
2677
    }
2678
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2679
    cpu_x86_set_cpl(env, 0);
2680
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2681
                           0, 0xffffffff,
2682
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683
                           DESC_S_MASK |
2684
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2685
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2686
                           0, 0xffffffff,
2687
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2688
                           DESC_S_MASK |
2689
                           DESC_W_MASK | DESC_A_MASK);
2690
    ESP = env->sysenter_esp;
2691
    EIP = env->sysenter_eip;
2692
}
2693

    
2694
void helper_sysexit(void)
2695
{
2696
    int cpl;
2697

    
2698
    cpl = env->hflags & HF_CPL_MASK;
2699
    if (env->sysenter_cs == 0 || cpl != 0) {
2700
        raise_exception_err(EXCP0D_GPF, 0);
2701
    }
2702
    cpu_x86_set_cpl(env, 3);
2703
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2704
                           0, 0xffffffff,
2705
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2706
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2707
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2708
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2709
                           0, 0xffffffff,
2710
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2711
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2712
                           DESC_W_MASK | DESC_A_MASK);
2713
    ESP = ECX;
2714
    EIP = EDX;
2715
#ifdef USE_KQEMU
2716
    if (kqemu_is_ok(env)) {
2717
        env->exception_index = -1;
2718
        cpu_loop_exit();
2719
    }
2720
#endif
2721
}
2722

    
2723
void helper_movl_crN_T0(int reg)
2724
{
2725
#if !defined(CONFIG_USER_ONLY)
2726
    switch(reg) {
2727
    case 0:
2728
        cpu_x86_update_cr0(env, T0);
2729
        break;
2730
    case 3:
2731
        cpu_x86_update_cr3(env, T0);
2732
        break;
2733
    case 4:
2734
        cpu_x86_update_cr4(env, T0);
2735
        break;
2736
    case 8:
2737
        cpu_set_apic_tpr(env, T0);
2738
        env->cr[8] = T0;
2739
        break;
2740
    default:
2741
        env->cr[reg] = T0;
2742
        break;
2743
    }
2744
#endif
2745
}
2746

    
2747
/* XXX: do more */
2748
void helper_movl_drN_T0(int reg)
2749
{
2750
    env->dr[reg] = T0;
2751
}
2752

    
2753
void helper_invlpg(target_ulong addr)
2754
{
2755
    cpu_x86_flush_tlb(env, addr);
2756
}
2757

    
2758
void helper_rdtsc(void)
2759
{
2760
    uint64_t val;
2761

    
2762
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2763
        raise_exception(EXCP0D_GPF);
2764
    }
2765
    val = cpu_get_tsc(env);
2766
    EAX = (uint32_t)(val);
2767
    EDX = (uint32_t)(val >> 32);
2768
}
2769

    
2770
void helper_rdpmc(void)
2771
{
2772
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2773
        raise_exception(EXCP0D_GPF);
2774
    }
2775

    
2776
    if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2777
        /* currently unimplemented */
2778
        raise_exception_err(EXCP06_ILLOP, 0);
2779
    }
2780
}
2781

    
2782
#if defined(CONFIG_USER_ONLY)
2783
void helper_wrmsr(void)
2784
{
2785
}
2786

    
2787
void helper_rdmsr(void)
2788
{
2789
}
2790
#else
2791
void helper_wrmsr(void)
2792
{
2793
    uint64_t val;
2794

    
2795
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2796

    
2797
    switch((uint32_t)ECX) {
2798
    case MSR_IA32_SYSENTER_CS:
2799
        env->sysenter_cs = val & 0xffff;
2800
        break;
2801
    case MSR_IA32_SYSENTER_ESP:
2802
        env->sysenter_esp = val;
2803
        break;
2804
    case MSR_IA32_SYSENTER_EIP:
2805
        env->sysenter_eip = val;
2806
        break;
2807
    case MSR_IA32_APICBASE:
2808
        cpu_set_apic_base(env, val);
2809
        break;
2810
    case MSR_EFER:
2811
        {
2812
            uint64_t update_mask;
2813
            update_mask = 0;
2814
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2815
                update_mask |= MSR_EFER_SCE;
2816
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2817
                update_mask |= MSR_EFER_LME;
2818
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2819
                update_mask |= MSR_EFER_FFXSR;
2820
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2821
                update_mask |= MSR_EFER_NXE;
2822
            env->efer = (env->efer & ~update_mask) |
2823
            (val & update_mask);
2824
        }
2825
        break;
2826
    case MSR_STAR:
2827
        env->star = val;
2828
        break;
2829
    case MSR_PAT:
2830
        env->pat = val;
2831
        break;
2832
    case MSR_VM_HSAVE_PA:
2833
        env->vm_hsave = val;
2834
        break;
2835
#ifdef TARGET_X86_64
2836
    case MSR_LSTAR:
2837
        env->lstar = val;
2838
        break;
2839
    case MSR_CSTAR:
2840
        env->cstar = val;
2841
        break;
2842
    case MSR_FMASK:
2843
        env->fmask = val;
2844
        break;
2845
    case MSR_FSBASE:
2846
        env->segs[R_FS].base = val;
2847
        break;
2848
    case MSR_GSBASE:
2849
        env->segs[R_GS].base = val;
2850
        break;
2851
    case MSR_KERNELGSBASE:
2852
        env->kernelgsbase = val;
2853
        break;
2854
#endif
2855
    default:
2856
        /* XXX: exception ? */
2857
        break;
2858
    }
2859
}
2860

    
2861
void helper_rdmsr(void)
2862
{
2863
    uint64_t val;
2864
    switch((uint32_t)ECX) {
2865
    case MSR_IA32_SYSENTER_CS:
2866
        val = env->sysenter_cs;
2867
        break;
2868
    case MSR_IA32_SYSENTER_ESP:
2869
        val = env->sysenter_esp;
2870
        break;
2871
    case MSR_IA32_SYSENTER_EIP:
2872
        val = env->sysenter_eip;
2873
        break;
2874
    case MSR_IA32_APICBASE:
2875
        val = cpu_get_apic_base(env);
2876
        break;
2877
    case MSR_EFER:
2878
        val = env->efer;
2879
        break;
2880
    case MSR_STAR:
2881
        val = env->star;
2882
        break;
2883
    case MSR_PAT:
2884
        val = env->pat;
2885
        break;
2886
    case MSR_VM_HSAVE_PA:
2887
        val = env->vm_hsave;
2888
        break;
2889
#ifdef TARGET_X86_64
2890
    case MSR_LSTAR:
2891
        val = env->lstar;
2892
        break;
2893
    case MSR_CSTAR:
2894
        val = env->cstar;
2895
        break;
2896
    case MSR_FMASK:
2897
        val = env->fmask;
2898
        break;
2899
    case MSR_FSBASE:
2900
        val = env->segs[R_FS].base;
2901
        break;
2902
    case MSR_GSBASE:
2903
        val = env->segs[R_GS].base;
2904
        break;
2905
    case MSR_KERNELGSBASE:
2906
        val = env->kernelgsbase;
2907
        break;
2908
#endif
2909
    default:
2910
        /* XXX: exception ? */
2911
        val = 0;
2912
        break;
2913
    }
2914
    EAX = (uint32_t)(val);
2915
    EDX = (uint32_t)(val >> 32);
2916
}
2917
#endif
2918

    
2919
void helper_lsl(void)
2920
{
2921
    unsigned int selector, limit;
2922
    uint32_t e1, e2, eflags;
2923
    int rpl, dpl, cpl, type;
2924

    
2925
    eflags = cc_table[CC_OP].compute_all();
2926
    selector = T0 & 0xffff;
2927
    if (load_segment(&e1, &e2, selector) != 0)
2928
        goto fail;
2929
    rpl = selector & 3;
2930
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2931
    cpl = env->hflags & HF_CPL_MASK;
2932
    if (e2 & DESC_S_MASK) {
2933
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2934
            /* conforming */
2935
        } else {
2936
            if (dpl < cpl || dpl < rpl)
2937
                goto fail;
2938
        }
2939
    } else {
2940
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2941
        switch(type) {
2942
        case 1:
2943
        case 2:
2944
        case 3:
2945
        case 9:
2946
        case 11:
2947
            break;
2948
        default:
2949
            goto fail;
2950
        }
2951
        if (dpl < cpl || dpl < rpl) {
2952
        fail:
2953
            CC_SRC = eflags & ~CC_Z;
2954
            return;
2955
        }
2956
    }
2957
    limit = get_seg_limit(e1, e2);
2958
    T1 = limit;
2959
    CC_SRC = eflags | CC_Z;
2960
}
2961

    
2962
void helper_lar(void)
2963
{
2964
    unsigned int selector;
2965
    uint32_t e1, e2, eflags;
2966
    int rpl, dpl, cpl, type;
2967

    
2968
    eflags = cc_table[CC_OP].compute_all();
2969
    selector = T0 & 0xffff;
2970
    if ((selector & 0xfffc) == 0)
2971
        goto fail;
2972
    if (load_segment(&e1, &e2, selector) != 0)
2973
        goto fail;
2974
    rpl = selector & 3;
2975
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976
    cpl = env->hflags & HF_CPL_MASK;
2977
    if (e2 & DESC_S_MASK) {
2978
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2979
            /* conforming */
2980
        } else {
2981
            if (dpl < cpl || dpl < rpl)
2982
                goto fail;
2983
        }
2984
    } else {
2985
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2986
        switch(type) {
2987
        case 1:
2988
        case 2:
2989
        case 3:
2990
        case 4:
2991
        case 5:
2992
        case 9:
2993
        case 11:
2994
        case 12:
2995
            break;
2996
        default:
2997
            goto fail;
2998
        }
2999
        if (dpl < cpl || dpl < rpl) {
3000
        fail:
3001
            CC_SRC = eflags & ~CC_Z;
3002
            return;
3003
        }
3004
    }
3005
    T1 = e2 & 0x00f0ff00;
3006
    CC_SRC = eflags | CC_Z;
3007
}
3008

    
3009
void helper_verr(void)
3010
{
3011
    unsigned int selector;
3012
    uint32_t e1, e2, eflags;
3013
    int rpl, dpl, cpl;
3014

    
3015
    eflags = cc_table[CC_OP].compute_all();
3016
    selector = T0 & 0xffff;
3017
    if ((selector & 0xfffc) == 0)
3018
        goto fail;
3019
    if (load_segment(&e1, &e2, selector) != 0)
3020
        goto fail;
3021
    if (!(e2 & DESC_S_MASK))
3022
        goto fail;
3023
    rpl = selector & 3;
3024
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3025
    cpl = env->hflags & HF_CPL_MASK;
3026
    if (e2 & DESC_CS_MASK) {
3027
        if (!(e2 & DESC_R_MASK))
3028
            goto fail;
3029
        if (!(e2 & DESC_C_MASK)) {
3030
            if (dpl < cpl || dpl < rpl)
3031
                goto fail;
3032
        }
3033
    } else {
3034
        if (dpl < cpl || dpl < rpl) {
3035
        fail:
3036
            CC_SRC = eflags & ~CC_Z;
3037
            return;
3038
        }
3039
    }
3040
    CC_SRC = eflags | CC_Z;
3041
}
3042

    
3043
void helper_verw(void)
3044
{
3045
    unsigned int selector;
3046
    uint32_t e1, e2, eflags;
3047
    int rpl, dpl, cpl;
3048

    
3049
    eflags = cc_table[CC_OP].compute_all();
3050
    selector = T0 & 0xffff;
3051
    if ((selector & 0xfffc) == 0)
3052
        goto fail;
3053
    if (load_segment(&e1, &e2, selector) != 0)
3054
        goto fail;
3055
    if (!(e2 & DESC_S_MASK))
3056
        goto fail;
3057
    rpl = selector & 3;
3058
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3059
    cpl = env->hflags & HF_CPL_MASK;
3060
    if (e2 & DESC_CS_MASK) {
3061
        goto fail;
3062
    } else {
3063
        if (dpl < cpl || dpl < rpl)
3064
            goto fail;
3065
        if (!(e2 & DESC_W_MASK)) {
3066
        fail:
3067
            CC_SRC = eflags & ~CC_Z;
3068
            return;
3069
        }
3070
    }
3071
    CC_SRC = eflags | CC_Z;
3072
}
3073

    
3074
/* FPU helpers */
3075

    
3076
void helper_fldt_ST0_A0(void)
3077
{
3078
    int new_fpstt;
3079
    new_fpstt = (env->fpstt - 1) & 7;
3080
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3081
    env->fpstt = new_fpstt;
3082
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3083
}
3084

    
3085
void helper_fstt_ST0_A0(void)
3086
{
3087
    helper_fstt(ST0, A0);
3088
}
3089

    
3090
static void fpu_set_exception(int mask)
3091
{
3092
    env->fpus |= mask;
3093
    if (env->fpus & (~env->fpuc & FPUC_EM))
3094
        env->fpus |= FPUS_SE | FPUS_B;
3095
}
3096

    
3097
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3098
{
3099
    if (b == 0.0)
3100
        fpu_set_exception(FPUS_ZE);
3101
    return a / b;
3102
}
3103

    
3104
void fpu_raise_exception(void)
3105
{
3106
    if (env->cr[0] & CR0_NE_MASK) {
3107
        raise_exception(EXCP10_COPR);
3108
    }
3109
#if !defined(CONFIG_USER_ONLY)
3110
    else {
3111
        cpu_set_ferr(env);
3112
    }
3113
#endif
3114
}
3115

    
3116
/* BCD ops */
3117

    
3118
void helper_fbld_ST0_A0(void)
3119
{
3120
    CPU86_LDouble tmp;
3121
    uint64_t val;
3122
    unsigned int v;
3123
    int i;
3124

    
3125
    val = 0;
3126
    for(i = 8; i >= 0; i--) {
3127
        v = ldub(A0 + i);
3128
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3129
    }
3130
    tmp = val;
3131
    if (ldub(A0 + 9) & 0x80)
3132
        tmp = -tmp;
3133
    fpush();
3134
    ST0 = tmp;
3135
}
3136

    
3137
void helper_fbst_ST0_A0(void)
3138
{
3139
    int v;
3140
    target_ulong mem_ref, mem_end;
3141
    int64_t val;
3142

    
3143
    val = floatx_to_int64(ST0, &env->fp_status);
3144
    mem_ref = A0;
3145
    mem_end = mem_ref + 9;
3146
    if (val < 0) {
3147
        stb(mem_end, 0x80);
3148
        val = -val;
3149
    } else {
3150
        stb(mem_end, 0x00);
3151
    }
3152
    while (mem_ref < mem_end) {
3153
        if (val == 0)
3154
            break;
3155
        v = val % 100;
3156
        val = val / 100;
3157
        v = ((v / 10) << 4) | (v % 10);
3158
        stb(mem_ref++, v);
3159
    }
3160
    while (mem_ref < mem_end) {
3161
        stb(mem_ref++, 0);
3162
    }
3163
}
3164

    
3165
void helper_f2xm1(void)
3166
{
3167
    ST0 = pow(2.0,ST0) - 1.0;
3168
}
3169

    
3170
void helper_fyl2x(void)
3171
{
3172
    CPU86_LDouble fptemp;
3173

    
3174
    fptemp = ST0;
3175
    if (fptemp>0.0){
3176
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3177
        ST1 *= fptemp;
3178
        fpop();
3179
    } else {
3180
        env->fpus &= (~0x4700);
3181
        env->fpus |= 0x400;
3182
    }
3183
}
3184

    
3185
void helper_fptan(void)
3186
{
3187
    CPU86_LDouble fptemp;
3188

    
3189
    fptemp = ST0;
3190
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3191
        env->fpus |= 0x400;
3192
    } else {
3193
        ST0 = tan(fptemp);
3194
        fpush();
3195
        ST0 = 1.0;
3196
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3197
        /* the above code is for  |arg| < 2**52 only */
3198
    }
3199
}
3200

    
3201
void helper_fpatan(void)
3202
{
3203
    CPU86_LDouble fptemp, fpsrcop;
3204

    
3205
    fpsrcop = ST1;
3206
    fptemp = ST0;
3207
    ST1 = atan2(fpsrcop,fptemp);
3208
    fpop();
3209
}
3210

    
3211
void helper_fxtract(void)
3212
{
3213
    CPU86_LDoubleU temp;
3214
    unsigned int expdif;
3215

    
3216
    temp.d = ST0;
3217
    expdif = EXPD(temp) - EXPBIAS;
3218
    /*DP exponent bias*/
3219
    ST0 = expdif;
3220
    fpush();
3221
    BIASEXPONENT(temp);
3222
    ST0 = temp.d;
3223
}
3224

    
3225
void helper_fprem1(void)
3226
{
3227
    CPU86_LDouble dblq, fpsrcop, fptemp;
3228
    CPU86_LDoubleU fpsrcop1, fptemp1;
3229
    int expdif;
3230
    signed long long int q;
3231

    
3232
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3233
        ST0 = 0.0 / 0.0; /* NaN */
3234
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3235
        return;
3236
    }
3237

    
3238
    fpsrcop = ST0;
3239
    fptemp = ST1;
3240
    fpsrcop1.d = fpsrcop;
3241
    fptemp1.d = fptemp;
3242
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3243

    
3244
    if (expdif < 0) {
3245
        /* optimisation? taken from the AMD docs */
3246
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3247
        /* ST0 is unchanged */
3248
        return;
3249
    }
3250

    
3251
    if (expdif < 53) {
3252
        dblq = fpsrcop / fptemp;
3253
        /* round dblq towards nearest integer */
3254
        dblq = rint(dblq);
3255
        ST0 = fpsrcop - fptemp * dblq;
3256

    
3257
        /* convert dblq to q by truncating towards zero */
3258
        if (dblq < 0.0)
3259
           q = (signed long long int)(-dblq);
3260
        else
3261
           q = (signed long long int)dblq;
3262

    
3263
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3264
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3265
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3266
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3267
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3268
    } else {
3269
        env->fpus |= 0x400;  /* C2 <-- 1 */
3270
        fptemp = pow(2.0, expdif - 50);
3271
        fpsrcop = (ST0 / ST1) / fptemp;
3272
        /* fpsrcop = integer obtained by chopping */
3273
        fpsrcop = (fpsrcop < 0.0) ?
3274
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3275
        ST0 -= (ST1 * fpsrcop * fptemp);
3276
    }
3277
}
3278

    
3279
void helper_fprem(void)
3280
{
3281
    CPU86_LDouble dblq, fpsrcop, fptemp;
3282
    CPU86_LDoubleU fpsrcop1, fptemp1;
3283
    int expdif;
3284
    signed long long int q;
3285

    
3286
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3287
       ST0 = 0.0 / 0.0; /* NaN */
3288
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3289
       return;
3290
    }
3291

    
3292
    fpsrcop = (CPU86_LDouble)ST0;
3293
    fptemp = (CPU86_LDouble)ST1;
3294
    fpsrcop1.d = fpsrcop;
3295
    fptemp1.d = fptemp;
3296
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3297

    
3298
    if (expdif < 0) {
3299
        /* optimisation? taken from the AMD docs */
3300
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3301
        /* ST0 is unchanged */
3302
        return;
3303
    }
3304

    
3305
    if ( expdif < 53 ) {
3306
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3307
        /* round dblq towards zero */
3308
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3309
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3310

    
3311
        /* convert dblq to q by truncating towards zero */
3312
        if (dblq < 0.0)
3313
           q = (signed long long int)(-dblq);
3314
        else
3315
           q = (signed long long int)dblq;
3316

    
3317
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3318
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3319
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3320
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3321
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3322
    } else {
3323
        int N = 32 + (expdif % 32); /* as per AMD docs */
3324
        env->fpus |= 0x400;  /* C2 <-- 1 */
3325
        fptemp = pow(2.0, (double)(expdif - N));
3326
        fpsrcop = (ST0 / ST1) / fptemp;
3327
        /* fpsrcop = integer obtained by chopping */
3328
        fpsrcop = (fpsrcop < 0.0) ?
3329
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3330
        ST0 -= (ST1 * fpsrcop * fptemp);
3331
    }
3332
}
3333

    
3334
void helper_fyl2xp1(void)
3335
{
3336
    CPU86_LDouble fptemp;
3337

    
3338
    fptemp = ST0;
3339
    if ((fptemp+1.0)>0.0) {
3340
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3341
        ST1 *= fptemp;
3342
        fpop();
3343
    } else {
3344
        env->fpus &= (~0x4700);
3345
        env->fpus |= 0x400;
3346
    }
3347
}
3348

    
3349
void helper_fsqrt(void)
3350
{
3351
    CPU86_LDouble fptemp;
3352

    
3353
    fptemp = ST0;
3354
    if (fptemp<0.0) {
3355
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3356
        env->fpus |= 0x400;
3357
    }
3358
    ST0 = sqrt(fptemp);
3359
}
3360

    
3361
void helper_fsincos(void)
3362
{
3363
    CPU86_LDouble fptemp;
3364

    
3365
    fptemp = ST0;
3366
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3367
        env->fpus |= 0x400;
3368
    } else {
3369
        ST0 = sin(fptemp);
3370
        fpush();
3371
        ST0 = cos(fptemp);
3372
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3373
        /* the above code is for  |arg| < 2**63 only */
3374
    }
3375
}
3376

    
3377
void helper_frndint(void)
3378
{
3379
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3380
}
3381

    
3382
void helper_fscale(void)
3383
{
3384
    ST0 = ldexp (ST0, (int)(ST1));
3385
}
3386

    
3387
void helper_fsin(void)
3388
{
3389
    CPU86_LDouble fptemp;
3390

    
3391
    fptemp = ST0;
3392
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3393
        env->fpus |= 0x400;
3394
    } else {
3395
        ST0 = sin(fptemp);
3396
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3397
        /* the above code is for  |arg| < 2**53 only */
3398
    }
3399
}
3400

    
3401
void helper_fcos(void)
3402
{
3403
    CPU86_LDouble fptemp;
3404

    
3405
    fptemp = ST0;
3406
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3407
        env->fpus |= 0x400;
3408
    } else {
3409
        ST0 = cos(fptemp);
3410
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3411
        /* the above code is for  |arg5 < 2**63 only */
3412
    }
3413
}
3414

    
3415
void helper_fxam_ST0(void)
3416
{
3417
    CPU86_LDoubleU temp;
3418
    int expdif;
3419

    
3420
    temp.d = ST0;
3421

    
3422
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3423
    if (SIGND(temp))
3424
        env->fpus |= 0x200; /* C1 <-- 1 */
3425

    
3426
    /* XXX: test fptags too */
3427
    expdif = EXPD(temp);
3428
    if (expdif == MAXEXPD) {
3429
#ifdef USE_X86LDOUBLE
3430
        if (MANTD(temp) == 0x8000000000000000ULL)
3431
#else
3432
        if (MANTD(temp) == 0)
3433
#endif
3434
            env->fpus |=  0x500 /*Infinity*/;
3435
        else
3436
            env->fpus |=  0x100 /*NaN*/;
3437
    } else if (expdif == 0) {
3438
        if (MANTD(temp) == 0)
3439
            env->fpus |=  0x4000 /*Zero*/;
3440
        else
3441
            env->fpus |= 0x4400 /*Denormal*/;
3442
    } else {
3443
        env->fpus |= 0x400;
3444
    }
3445
}
3446

    
3447
void helper_fstenv(target_ulong ptr, int data32)
3448
{
3449
    int fpus, fptag, exp, i;
3450
    uint64_t mant;
3451
    CPU86_LDoubleU tmp;
3452

    
3453
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3454
    fptag = 0;
3455
    for (i=7; i>=0; i--) {
3456
        fptag <<= 2;
3457
        if (env->fptags[i]) {
3458
            fptag |= 3;
3459
        } else {
3460
            tmp.d = env->fpregs[i].d;
3461
            exp = EXPD(tmp);
3462
            mant = MANTD(tmp);
3463
            if (exp == 0 && mant == 0) {
3464
                /* zero */
3465
                fptag |= 1;
3466
            } else if (exp == 0 || exp == MAXEXPD
3467
#ifdef USE_X86LDOUBLE
3468
                       || (mant & (1LL << 63)) == 0
3469
#endif
3470
                       ) {
3471
                /* NaNs, infinity, denormal */
3472
                fptag |= 2;
3473
            }
3474
        }
3475
    }
3476
    if (data32) {
3477
        /* 32 bit */
3478
        stl(ptr, env->fpuc);
3479
        stl(ptr + 4, fpus);
3480
        stl(ptr + 8, fptag);
3481
        stl(ptr + 12, 0); /* fpip */
3482
        stl(ptr + 16, 0); /* fpcs */
3483
        stl(ptr + 20, 0); /* fpoo */
3484
        stl(ptr + 24, 0); /* fpos */
3485
    } else {
3486
        /* 16 bit */
3487
        stw(ptr, env->fpuc);
3488
        stw(ptr + 2, fpus);
3489
        stw(ptr + 4, fptag);
3490
        stw(ptr + 6, 0);
3491
        stw(ptr + 8, 0);
3492
        stw(ptr + 10, 0);
3493
        stw(ptr + 12, 0);
3494
    }
3495
}
3496

    
3497
void helper_fldenv(target_ulong ptr, int data32)
3498
{
3499
    int i, fpus, fptag;
3500

    
3501
    if (data32) {
3502
        env->fpuc = lduw(ptr);
3503
        fpus = lduw(ptr + 4);
3504
        fptag = lduw(ptr + 8);
3505
    }
3506
    else {
3507
        env->fpuc = lduw(ptr);
3508
        fpus = lduw(ptr + 2);
3509
        fptag = lduw(ptr + 4);
3510
    }
3511
    env->fpstt = (fpus >> 11) & 7;
3512
    env->fpus = fpus & ~0x3800;
3513
    for(i = 0;i < 8; i++) {
3514
        env->fptags[i] = ((fptag & 3) == 3);
3515
        fptag >>= 2;
3516
    }
3517
}
3518

    
3519
void helper_fsave(target_ulong ptr, int data32)
3520
{
3521
    CPU86_LDouble tmp;
3522
    int i;
3523

    
3524
    helper_fstenv(ptr, data32);
3525

    
3526
    ptr += (14 << data32);
3527
    for(i = 0;i < 8; i++) {
3528
        tmp = ST(i);
3529
        helper_fstt(tmp, ptr);
3530
        ptr += 10;
3531
    }
3532

    
3533
    /* fninit */
3534
    env->fpus = 0;
3535
    env->fpstt = 0;
3536
    env->fpuc = 0x37f;
3537
    env->fptags[0] = 1;
3538
    env->fptags[1] = 1;
3539
    env->fptags[2] = 1;
3540
    env->fptags[3] = 1;
3541
    env->fptags[4] = 1;
3542
    env->fptags[5] = 1;
3543
    env->fptags[6] = 1;
3544
    env->fptags[7] = 1;
3545
}
3546

    
3547
void helper_frstor(target_ulong ptr, int data32)
3548
{
3549
    CPU86_LDouble tmp;
3550
    int i;
3551

    
3552
    helper_fldenv(ptr, data32);
3553
    ptr += (14 << data32);
3554

    
3555
    for(i = 0;i < 8; i++) {
3556
        tmp = helper_fldt(ptr);
3557
        ST(i) = tmp;
3558
        ptr += 10;
3559
    }
3560
}
3561

    
3562
void helper_fxsave(target_ulong ptr, int data64)
3563
{
3564
    int fpus, fptag, i, nb_xmm_regs;
3565
    CPU86_LDouble tmp;
3566
    target_ulong addr;
3567

    
3568
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3569
    fptag = 0;
3570
    for(i = 0; i < 8; i++) {
3571
        fptag |= (env->fptags[i] << i);
3572
    }
3573
    stw(ptr, env->fpuc);
3574
    stw(ptr + 2, fpus);
3575
    stw(ptr + 4, fptag ^ 0xff);
3576

    
3577
    addr = ptr + 0x20;
3578
    for(i = 0;i < 8; i++) {
3579
        tmp = ST(i);
3580
        helper_fstt(tmp, addr);
3581
        addr += 16;
3582
    }
3583

    
3584
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3585
        /* XXX: finish it */
3586
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3587
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3588
        nb_xmm_regs = 8 << data64;
3589
        addr = ptr + 0xa0;
3590
        for(i = 0; i < nb_xmm_regs; i++) {
3591
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3592
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3593
            addr += 16;
3594
        }
3595
    }
3596
}
3597

    
3598
void helper_fxrstor(target_ulong ptr, int data64)
3599
{
3600
    int i, fpus, fptag, nb_xmm_regs;
3601
    CPU86_LDouble tmp;
3602
    target_ulong addr;
3603

    
3604
    env->fpuc = lduw(ptr);
3605
    fpus = lduw(ptr + 2);
3606
    fptag = lduw(ptr + 4);
3607
    env->fpstt = (fpus >> 11) & 7;
3608
    env->fpus = fpus & ~0x3800;
3609
    fptag ^= 0xff;
3610
    for(i = 0;i < 8; i++) {
3611
        env->fptags[i] = ((fptag >> i) & 1);
3612
    }
3613

    
3614
    addr = ptr + 0x20;
3615
    for(i = 0;i < 8; i++) {
3616
        tmp = helper_fldt(addr);
3617
        ST(i) = tmp;
3618
        addr += 16;
3619
    }
3620

    
3621
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3622
        /* XXX: finish it */
3623
        env->mxcsr = ldl(ptr + 0x18);
3624
        //ldl(ptr + 0x1c);
3625
        nb_xmm_regs = 8 << data64;
3626
        addr = ptr + 0xa0;
3627
        for(i = 0; i < nb_xmm_regs; i++) {
3628
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3629
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3630
            addr += 16;
3631
        }
3632
    }
3633
}
3634

    
3635
#ifndef USE_X86LDOUBLE
3636

    
3637
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3638
{
3639
    CPU86_LDoubleU temp;
3640
    int e;
3641

    
3642
    temp.d = f;
3643
    /* mantissa */
3644
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3645
    /* exponent + sign */
3646
    e = EXPD(temp) - EXPBIAS + 16383;
3647
    e |= SIGND(temp) >> 16;
3648
    *pexp = e;
3649
}
3650

    
3651
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3652
{
3653
    CPU86_LDoubleU temp;
3654
    int e;
3655
    uint64_t ll;
3656

    
3657
    /* XXX: handle overflow ? */
3658
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3659
    e |= (upper >> 4) & 0x800; /* sign */
3660
    ll = (mant >> 11) & ((1LL << 52) - 1);
3661
#ifdef __arm__
3662
    temp.l.upper = (e << 20) | (ll >> 32);
3663
    temp.l.lower = ll;
3664
#else
3665
    temp.ll = ll | ((uint64_t)e << 52);
3666
#endif
3667
    return temp.d;
3668
}
3669

    
3670
#else
3671

    
3672
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3673
{
3674
    CPU86_LDoubleU temp;
3675

    
3676
    temp.d = f;
3677
    *pmant = temp.l.lower;
3678
    *pexp = temp.l.upper;
3679
}
3680

    
3681
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3682
{
3683
    CPU86_LDoubleU temp;
3684

    
3685
    temp.l.upper = upper;
3686
    temp.l.lower = mant;
3687
    return temp.d;
3688
}
3689
#endif
3690

    
3691
#ifdef TARGET_X86_64
3692

    
3693
//#define DEBUG_MULDIV
3694

    
3695
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3696
{
3697
    *plow += a;
3698
    /* carry test */
3699
    if (*plow < a)
3700
        (*phigh)++;
3701
    *phigh += b;
3702
}
3703

    
3704
static void neg128(uint64_t *plow, uint64_t *phigh)
3705
{
3706
    *plow = ~ *plow;
3707
    *phigh = ~ *phigh;
3708
    add128(plow, phigh, 1, 0);
3709
}
3710

    
3711
/* return TRUE if overflow */
3712
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3713
{
3714
    uint64_t q, r, a1, a0;
3715
    int i, qb, ab;
3716

    
3717
    a0 = *plow;
3718
    a1 = *phigh;
3719
    if (a1 == 0) {
3720
        q = a0 / b;
3721
        r = a0 % b;
3722
        *plow = q;
3723
        *phigh = r;
3724
    } else {
3725
        if (a1 >= b)
3726
            return 1;
3727
        /* XXX: use a better algorithm */
3728
        for(i = 0; i < 64; i++) {
3729
            ab = a1 >> 63;
3730
            a1 = (a1 << 1) | (a0 >> 63);
3731
            if (ab || a1 >= b) {
3732
                a1 -= b;
3733
                qb = 1;
3734
            } else {
3735
                qb = 0;
3736
            }
3737
            a0 = (a0 << 1) | qb;
3738
        }
3739
#if defined(DEBUG_MULDIV)
3740
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3741
               *phigh, *plow, b, a0, a1);
3742
#endif
3743
        *plow = a0;
3744
        *phigh = a1;
3745
    }
3746
    return 0;
3747
}
3748

    
3749
/* return TRUE if overflow */
3750
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3751
{
3752
    int sa, sb;
3753
    sa = ((int64_t)*phigh < 0);
3754
    if (sa)
3755
        neg128(plow, phigh);
3756
    sb = (b < 0);
3757
    if (sb)
3758
        b = -b;
3759
    if (div64(plow, phigh, b) != 0)
3760
        return 1;
3761
    if (sa ^ sb) {
3762
        if (*plow > (1ULL << 63))
3763
            return 1;
3764
        *plow = - *plow;
3765
    } else {
3766
        if (*plow >= (1ULL << 63))
3767
            return 1;
3768
    }
3769
    if (sa)
3770
        *phigh = - *phigh;
3771
    return 0;
3772
}
3773

    
3774
void helper_mulq_EAX_T0(void)
3775
{
3776
    uint64_t r0, r1;
3777

    
3778
    mulu64(&r0, &r1, EAX, T0);
3779
    EAX = r0;
3780
    EDX = r1;
3781
    CC_DST = r0;
3782
    CC_SRC = r1;
3783
}
3784

    
3785
void helper_imulq_EAX_T0(void)
3786
{
3787
    uint64_t r0, r1;
3788

    
3789
    muls64(&r0, &r1, EAX, T0);
3790
    EAX = r0;
3791
    EDX = r1;
3792
    CC_DST = r0;
3793
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3794
}
3795

    
3796
void helper_imulq_T0_T1(void)
3797
{
3798
    uint64_t r0, r1;
3799

    
3800
    muls64(&r0, &r1, T0, T1);
3801
    T0 = r0;
3802
    CC_DST = r0;
3803
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3804
}
3805

    
3806
void helper_divq_EAX_T0(void)
3807
{
3808
    uint64_t r0, r1;
3809
    if (T0 == 0) {
3810
        raise_exception(EXCP00_DIVZ);
3811
    }
3812
    r0 = EAX;
3813
    r1 = EDX;
3814
    if (div64(&r0, &r1, T0))
3815
        raise_exception(EXCP00_DIVZ);
3816
    EAX = r0;
3817
    EDX = r1;
3818
}
3819

    
3820
void helper_idivq_EAX_T0(void)
3821
{
3822
    uint64_t r0, r1;
3823
    if (T0 == 0) {
3824
        raise_exception(EXCP00_DIVZ);
3825
    }
3826
    r0 = EAX;
3827
    r1 = EDX;
3828
    if (idiv64(&r0, &r1, T0))
3829
        raise_exception(EXCP00_DIVZ);
3830
    EAX = r0;
3831
    EDX = r1;
3832
}
3833

    
3834
void helper_bswapq_T0(void)
3835
{
3836
    T0 = bswap64(T0);
3837
}
3838
#endif
3839

    
3840
void helper_hlt(void)
3841
{
3842
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3843
    env->hflags |= HF_HALTED_MASK;
3844
    env->exception_index = EXCP_HLT;
3845
    cpu_loop_exit();
3846
}
3847

    
3848
void helper_monitor(void)
3849
{
3850
    if ((uint32_t)ECX != 0)
3851
        raise_exception(EXCP0D_GPF);
3852
    /* XXX: store address ? */
3853
}
3854

    
3855
void helper_mwait(void)
3856
{
3857
    if ((uint32_t)ECX != 0)
3858
        raise_exception(EXCP0D_GPF);
3859
    /* XXX: not complete but not completely erroneous */
3860
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3861
        /* more than one CPU: do not sleep because another CPU may
3862
           wake this one */
3863
    } else {
3864
        helper_hlt();
3865
    }
3866
}
3867

    
3868
float approx_rsqrt(float a)
3869
{
3870
    return 1.0 / sqrt(a);
3871
}
3872

    
3873
float approx_rcp(float a)
3874
{
3875
    return 1.0 / a;
3876
}
3877

    
3878
void update_fp_status(void)
3879
{
3880
    int rnd_type;
3881

    
3882
    /* set rounding mode */
3883
    switch(env->fpuc & RC_MASK) {
3884
    default:
3885
    case RC_NEAR:
3886
        rnd_type = float_round_nearest_even;
3887
        break;
3888
    case RC_DOWN:
3889
        rnd_type = float_round_down;
3890
        break;
3891
    case RC_UP:
3892
        rnd_type = float_round_up;
3893
        break;
3894
    case RC_CHOP:
3895
        rnd_type = float_round_to_zero;
3896
        break;
3897
    }
3898
    set_float_rounding_mode(rnd_type, &env->fp_status);
3899
#ifdef FLOATX80
3900
    switch((env->fpuc >> 8) & 3) {
3901
    case 0:
3902
        rnd_type = 32;
3903
        break;
3904
    case 2:
3905
        rnd_type = 64;
3906
        break;
3907
    case 3:
3908
    default:
3909
        rnd_type = 80;
3910
        break;
3911
    }
3912
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3913
#endif
3914
}
3915

    
3916
#if !defined(CONFIG_USER_ONLY)
3917

    
3918
#define MMUSUFFIX _mmu
3919
#ifdef __s390__
3920
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3921
#else
3922
# define GETPC() (__builtin_return_address(0))
3923
#endif
3924

    
3925
#define SHIFT 0
3926
#include "softmmu_template.h"
3927

    
3928
#define SHIFT 1
3929
#include "softmmu_template.h"
3930

    
3931
#define SHIFT 2
3932
#include "softmmu_template.h"
3933

    
3934
#define SHIFT 3
3935
#include "softmmu_template.h"
3936

    
3937
#endif
3938

    
3939
/* try to fill the TLB and return an exception if error. If retaddr is
3940
   NULL, it means that the function was called in C code (i.e. not
3941
   from generated code or from helper.c) */
3942
/* XXX: fix it to restore all registers */
3943
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3944
{
3945
    TranslationBlock *tb;
3946
    int ret;
3947
    unsigned long pc;
3948
    CPUX86State *saved_env;
3949

    
3950
    /* XXX: hack to restore env in all cases, even if not called from
3951
       generated code */
3952
    saved_env = env;
3953
    env = cpu_single_env;
3954

    
3955
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3956
    if (ret) {
3957
        if (retaddr) {
3958
            /* now we have a real cpu fault */
3959
            pc = (unsigned long)retaddr;
3960
            tb = tb_find_pc(pc);
3961
            if (tb) {
3962
                /* the PC is inside the translated code. It means that we have
3963
                   a virtual CPU fault */
3964
                cpu_restore_state(tb, env, pc, NULL);
3965
            }
3966
        }
3967
        if (retaddr)
3968
            raise_exception_err(env->exception_index, env->error_code);
3969
        else
3970
            raise_exception_err_norestore(env->exception_index, env->error_code);
3971
    }
3972
    env = saved_env;
3973
}
3974

    
3975

    
3976
/* Secure Virtual Machine helpers */
3977

    
3978
void helper_stgi(void)
3979
{
3980
    env->hflags |= HF_GIF_MASK;
3981
}
3982

    
3983
void helper_clgi(void)
3984
{
3985
    env->hflags &= ~HF_GIF_MASK;
3986
}
3987

    
3988
#if defined(CONFIG_USER_ONLY)
3989

    
3990
void helper_vmrun(target_ulong addr) { }
3991
void helper_vmmcall(void) { }
3992
void helper_vmload(target_ulong addr) { }
3993
void helper_vmsave(target_ulong addr) { }
3994
void helper_skinit(void) { }
3995
void helper_invlpga(void) { }
3996
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3997
int svm_check_intercept_param(uint32_t type, uint64_t param)
3998
{
3999
    return 0;
4000
}
4001

    
4002
#else
4003

    
4004
static inline uint32_t
4005
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4006
{
4007
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
4008
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
4009
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
4010
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
4011
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
4012
}
4013

    
4014
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4015
{
4016
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
4017
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
4018
}
4019

    
4020
void helper_vmrun(target_ulong addr)
4021
{
4022
    uint32_t event_inj;
4023
    uint32_t int_ctl;
4024

    
4025
    if (loglevel & CPU_LOG_TB_IN_ASM)
4026
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4027

    
4028
    env->vm_vmcb = addr;
4029
    regs_to_env();
4030

    
4031
    /* save the current CPU state in the hsave page */
4032
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4033
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4034

    
4035
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4036
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4037

    
4038
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4039
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4040
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4041
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4042
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4043
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4044
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4045

    
4046
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4047
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4048

    
4049
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4050
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4051
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4052
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4053

    
4054
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4055
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4056
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4057

    
4058
    /* load the interception bitmaps so we do not need to access the
4059
       vmcb in svm mode */
4060
    /* We shift all the intercept bits so we can OR them with the TB
4061
       flags later on */
4062
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4063
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4064
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4065
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4066
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4067
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4068

    
4069
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4070
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4071

    
4072
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4073
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4074

    
4075
    /* clear exit_info_2 so we behave like the real hardware */
4076
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4077

    
4078
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4079
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4080
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4081
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4082
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4083
    if (int_ctl & V_INTR_MASKING_MASK) {
4084
        env->cr[8] = int_ctl & V_TPR_MASK;
4085
        cpu_set_apic_tpr(env, env->cr[8]);
4086
        if (env->eflags & IF_MASK)
4087
            env->hflags |= HF_HIF_MASK;
4088
    }
4089

    
4090
#ifdef TARGET_X86_64
4091
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4092
    env->hflags &= ~HF_LMA_MASK;
4093
    if (env->efer & MSR_EFER_LMA)
4094
       env->hflags |= HF_LMA_MASK;
4095
#endif
4096
    env->eflags = 0;
4097
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4098
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4099
    CC_OP = CC_OP_EFLAGS;
4100
    CC_DST = 0xffffffff;
4101

    
4102
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4103
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4104
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4105
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4106

    
4107
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4108
    env->eip = EIP;
4109
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4110
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4111
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4112
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4113
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4114

    
4115
    /* FIXME: guest state consistency checks */
4116

    
4117
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4118
        case TLB_CONTROL_DO_NOTHING:
4119
            break;
4120
        case TLB_CONTROL_FLUSH_ALL_ASID:
4121
            /* FIXME: this is not 100% correct but should work for now */
4122
            tlb_flush(env, 1);
4123
        break;
4124
    }
4125

    
4126
    helper_stgi();
4127

    
4128
    regs_to_env();
4129

    
4130
    /* maybe we need to inject an event */
4131
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4132
    if (event_inj & SVM_EVTINJ_VALID) {
4133
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4134
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4135
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4136
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4137

    
4138
        if (loglevel & CPU_LOG_TB_IN_ASM)
4139
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4140
        /* FIXME: need to implement valid_err */
4141
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4142
        case SVM_EVTINJ_TYPE_INTR:
4143
                env->exception_index = vector;
4144
                env->error_code = event_inj_err;
4145
                env->exception_is_int = 0;
4146
                env->exception_next_eip = -1;
4147
                if (loglevel & CPU_LOG_TB_IN_ASM)
4148
                    fprintf(logfile, "INTR");
4149
                break;
4150
        case SVM_EVTINJ_TYPE_NMI:
4151
                env->exception_index = vector;
4152
                env->error_code = event_inj_err;
4153
                env->exception_is_int = 0;
4154
                env->exception_next_eip = EIP;
4155
                if (loglevel & CPU_LOG_TB_IN_ASM)
4156
                    fprintf(logfile, "NMI");
4157
                break;
4158
        case SVM_EVTINJ_TYPE_EXEPT:
4159
                env->exception_index = vector;
4160
                env->error_code = event_inj_err;
4161
                env->exception_is_int = 0;
4162
                env->exception_next_eip = -1;
4163
                if (loglevel & CPU_LOG_TB_IN_ASM)
4164
                    fprintf(logfile, "EXEPT");
4165
                break;
4166
        case SVM_EVTINJ_TYPE_SOFT:
4167
                env->exception_index = vector;
4168
                env->error_code = event_inj_err;
4169
                env->exception_is_int = 1;
4170
                env->exception_next_eip = EIP;
4171
                if (loglevel & CPU_LOG_TB_IN_ASM)
4172
                    fprintf(logfile, "SOFT");
4173
                break;
4174
        }
4175
        if (loglevel & CPU_LOG_TB_IN_ASM)
4176
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4177
    }
4178
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4179
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4180
    }
4181

    
4182
    cpu_loop_exit();
4183
}
4184

    
4185
void helper_vmmcall(void)
4186
{
4187
    if (loglevel & CPU_LOG_TB_IN_ASM)
4188
        fprintf(logfile,"vmmcall!\n");
4189
}
4190

    
4191
void helper_vmload(target_ulong addr)
4192
{
4193
    if (loglevel & CPU_LOG_TB_IN_ASM)
4194
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4195
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4196
                env->segs[R_FS].base);
4197

    
4198
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4199
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4200
    SVM_LOAD_SEG2(addr, tr, tr);
4201
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4202

    
4203
#ifdef TARGET_X86_64
4204
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4205
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4206
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4207
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4208
#endif
4209
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4210
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4211
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4212
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4213
}
4214

    
4215
void helper_vmsave(target_ulong addr)
4216
{
4217
    if (loglevel & CPU_LOG_TB_IN_ASM)
4218
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4219
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4220
                env->segs[R_FS].base);
4221

    
4222
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4223
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4224
    SVM_SAVE_SEG(addr, tr, tr);
4225
    SVM_SAVE_SEG(addr, ldt, ldtr);
4226

    
4227
#ifdef TARGET_X86_64
4228
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4229
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4230
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4231
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4232
#endif
4233
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4234
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4235
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4236
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4237
}
4238

    
4239
void helper_skinit(void)
4240
{
4241
    if (loglevel & CPU_LOG_TB_IN_ASM)
4242
        fprintf(logfile,"skinit!\n");
4243
}
4244

    
4245
void helper_invlpga(void)
4246
{
4247
    tlb_flush(env, 0);
4248
}
4249

    
4250
int svm_check_intercept_param(uint32_t type, uint64_t param)
4251
{
4252
    switch(type) {
4253
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4254
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4255
            vmexit(type, param);
4256
            return 1;
4257
        }
4258
        break;
4259
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4260
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4261
            vmexit(type, param);
4262
            return 1;
4263
        }
4264
        break;
4265
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4266
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4267
            vmexit(type, param);
4268
            return 1;
4269
        }
4270
        break;
4271
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4272
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4273
            vmexit(type, param);
4274
            return 1;
4275
        }
4276
        break;
4277
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4278
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4279
            vmexit(type, param);
4280
            return 1;
4281
        }
4282
        break;
4283
    case SVM_EXIT_IOIO:
4284
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4285
            /* FIXME: this should be read in at vmrun (faster this way?) */
4286
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4287
            uint16_t port = (uint16_t) (param >> 16);
4288

    
4289
            uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4290
            if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4291
                vmexit(type, param);
4292
        }
4293
        break;
4294

    
4295
    case SVM_EXIT_MSR:
4296
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4297
            /* FIXME: this should be read in at vmrun (faster this way?) */
4298
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4299
            switch((uint32_t)ECX) {
4300
            case 0 ... 0x1fff:
4301
                T0 = (ECX * 2) % 8;
4302
                T1 = ECX / 8;
4303
                break;
4304
            case 0xc0000000 ... 0xc0001fff:
4305
                T0 = (8192 + ECX - 0xc0000000) * 2;
4306
                T1 = (T0 / 8);
4307
                T0 %= 8;
4308
                break;
4309
            case 0xc0010000 ... 0xc0011fff:
4310
                T0 = (16384 + ECX - 0xc0010000) * 2;
4311
                T1 = (T0 / 8);
4312
                T0 %= 8;
4313
                break;
4314
            default:
4315
                vmexit(type, param);
4316
                return 1;
4317
            }
4318
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4319
                vmexit(type, param);
4320
            return 1;
4321
        }
4322
        break;
4323
    default:
4324
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4325
            vmexit(type, param);
4326
            return 1;
4327
        }
4328
        break;
4329
    }
4330
    return 0;
4331
}
4332

    
4333
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4334
{
4335
    uint32_t int_ctl;
4336

    
4337
    if (loglevel & CPU_LOG_TB_IN_ASM)
4338
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4339
                exit_code, exit_info_1,
4340
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4341
                EIP);
4342

    
4343
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4344
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4345
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4346
    } else {
4347
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4348
    }
4349

    
4350
    /* Save the VM state in the vmcb */
4351
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4352
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4353
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4354
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4355

    
4356
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4357
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4358

    
4359
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4360
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4361

    
4362
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4363
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4364
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4365
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4366
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4367

    
4368
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4369
        int_ctl &= ~V_TPR_MASK;
4370
        int_ctl |= env->cr[8] & V_TPR_MASK;
4371
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4372
    }
4373

    
4374
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4375
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4376
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4377
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4378
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4379
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4380
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4381

    
4382
    /* Reload the host state from vm_hsave */
4383
    env->hflags &= ~HF_HIF_MASK;
4384
    env->intercept = 0;
4385
    env->intercept_exceptions = 0;
4386
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4387

    
4388
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4389
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4390

    
4391
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4392
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4393

    
4394
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4395
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4396
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4397
    if (int_ctl & V_INTR_MASKING_MASK) {
4398
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4399
        cpu_set_apic_tpr(env, env->cr[8]);
4400
    }
4401
    /* we need to set the efer after the crs so the hidden flags get set properly */
4402
#ifdef TARGET_X86_64
4403
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4404
    env->hflags &= ~HF_LMA_MASK;
4405
    if (env->efer & MSR_EFER_LMA)
4406
       env->hflags |= HF_LMA_MASK;
4407
#endif
4408

    
4409
    env->eflags = 0;
4410
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4411
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4412
    CC_OP = CC_OP_EFLAGS;
4413

    
4414
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4415
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4416
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4417
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4418

    
4419
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4420
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4421
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4422

    
4423
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4424
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4425

    
4426
    /* other setups */
4427
    cpu_x86_set_cpl(env, 0);
4428
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4429
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4430
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4431

    
4432
    helper_clgi();
4433
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4434

    
4435
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4436

    
4437
    /* Clears the TSC_OFFSET inside the processor. */
4438

    
4439
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4440
       from the page table indicated the host's CR3. If the PDPEs contain
4441
       illegal state, the processor causes a shutdown. */
4442

    
4443
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4444
    env->cr[0] |= CR0_PE_MASK;
4445
    env->eflags &= ~VM_MASK;
4446

    
4447
    /* Disables all breakpoints in the host DR7 register. */
4448

    
4449
    /* Checks the reloaded host state for consistency. */
4450

    
4451
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4452
       host's code segment or non-canonical (in the case of long mode), a
4453
       #GP fault is delivered inside the host.) */
4454

    
4455
    /* remove any pending exception */
4456
    env->exception_index = -1;
4457
    env->error_code = 0;
4458
    env->old_exception = -1;
4459

    
4460
    regs_to_env();
4461
    cpu_loop_exit();
4462
}
4463

    
4464
#endif