Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 9596ebb7

History | View | Annotate | Download (130.2 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21
#include "host-utils.h"
22

    
23
//#define DEBUG_PCALL
24

    
25
#if 0
26
#define raise_exception_err(a, b)\
27
do {\
28
    if (logfile)\
29
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30
    (raise_exception_err)(a, b);\
31
} while (0)
32
#endif
33

    
34
const uint8_t parity_table[256] = {
35
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
};
68

    
69
/* modulo 17 table */
70
const uint8_t rclw_table[32] = {
71
    0, 1, 2, 3, 4, 5, 6, 7,
72
    8, 9,10,11,12,13,14,15,
73
   16, 0, 1, 2, 3, 4, 5, 6,
74
    7, 8, 9,10,11,12,13,14,
75
};
76

    
77
/* modulo 9 table */
78
const uint8_t rclb_table[32] = {
79
    0, 1, 2, 3, 4, 5, 6, 7,
80
    8, 0, 1, 2, 3, 4, 5, 6,
81
    7, 8, 0, 1, 2, 3, 4, 5,
82
    6, 7, 8, 0, 1, 2, 3, 4,
83
};
84

    
85
const CPU86_LDouble f15rk[7] =
86
{
87
    0.00000000000000000000L,
88
    1.00000000000000000000L,
89
    3.14159265358979323851L,  /*pi*/
90
    0.30102999566398119523L,  /*lg2*/
91
    0.69314718055994530943L,  /*ln2*/
92
    1.44269504088896340739L,  /*l2e*/
93
    3.32192809488736234781L,  /*l2t*/
94
};
95

    
96
/* thread support */
97

    
98
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99

    
100
void cpu_lock(void)
101
{
102
    spin_lock(&global_cpu_lock);
103
}
104

    
105
void cpu_unlock(void)
106
{
107
    spin_unlock(&global_cpu_lock);
108
}
109

    
110
/* return non zero if error */
111
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112
                               int selector)
113
{
114
    SegmentCache *dt;
115
    int index;
116
    target_ulong ptr;
117

    
118
    if (selector & 0x4)
119
        dt = &env->ldt;
120
    else
121
        dt = &env->gdt;
122
    index = selector & ~7;
123
    if ((index + 7) > dt->limit)
124
        return -1;
125
    ptr = dt->base + index;
126
    *e1_ptr = ldl_kernel(ptr);
127
    *e2_ptr = ldl_kernel(ptr + 4);
128
    return 0;
129
}
130

    
131
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132
{
133
    unsigned int limit;
134
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135
    if (e2 & DESC_G_MASK)
136
        limit = (limit << 12) | 0xfff;
137
    return limit;
138
}
139

    
140
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141
{
142
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143
}
144

    
145
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146
{
147
    sc->base = get_seg_base(e1, e2);
148
    sc->limit = get_seg_limit(e1, e2);
149
    sc->flags = e2;
150
}
151

    
152
/* init the segment cache in vm86 mode. */
153
static inline void load_seg_vm(int seg, int selector)
154
{
155
    selector &= 0xffff;
156
    cpu_x86_load_seg_cache(env, seg, selector,
157
                           (selector << 4), 0xffff, 0);
158
}
159

    
160
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161
                                       uint32_t *esp_ptr, int dpl)
162
{
163
    int type, index, shift;
164

    
165
#if 0
166
    {
167
        int i;
168
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169
        for(i=0;i<env->tr.limit;i++) {
170
            printf("%02x ", env->tr.base[i]);
171
            if ((i & 7) == 7) printf("\n");
172
        }
173
        printf("\n");
174
    }
175
#endif
176

    
177
    if (!(env->tr.flags & DESC_P_MASK))
178
        cpu_abort(env, "invalid tss");
179
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180
    if ((type & 7) != 1)
181
        cpu_abort(env, "invalid tss type");
182
    shift = type >> 3;
183
    index = (dpl * 4 + 2) << shift;
184
    if (index + (4 << shift) - 1 > env->tr.limit)
185
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186
    if (shift == 0) {
187
        *esp_ptr = lduw_kernel(env->tr.base + index);
188
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189
    } else {
190
        *esp_ptr = ldl_kernel(env->tr.base + index);
191
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192
    }
193
}
194

    
195
/* XXX: merge with load_seg() */
196
static void tss_load_seg(int seg_reg, int selector)
197
{
198
    uint32_t e1, e2;
199
    int rpl, dpl, cpl;
200

    
201
    if ((selector & 0xfffc) != 0) {
202
        if (load_segment(&e1, &e2, selector) != 0)
203
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204
        if (!(e2 & DESC_S_MASK))
205
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206
        rpl = selector & 3;
207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208
        cpl = env->hflags & HF_CPL_MASK;
209
        if (seg_reg == R_CS) {
210
            if (!(e2 & DESC_CS_MASK))
211
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212
            /* XXX: is it correct ? */
213
            if (dpl != rpl)
214
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215
            if ((e2 & DESC_C_MASK) && dpl > rpl)
216
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217
        } else if (seg_reg == R_SS) {
218
            /* SS must be writable data */
219
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221
            if (dpl != cpl || dpl != rpl)
222
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223
        } else {
224
            /* not readable code */
225
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227
            /* if data or non conforming code, checks the rights */
228
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229
                if (dpl < cpl || dpl < rpl)
230
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
            }
232
        }
233
        if (!(e2 & DESC_P_MASK))
234
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235
        cpu_x86_load_seg_cache(env, seg_reg, selector,
236
                       get_seg_base(e1, e2),
237
                       get_seg_limit(e1, e2),
238
                       e2);
239
    } else {
240
        if (seg_reg == R_SS || seg_reg == R_CS)
241
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
    }
243
}
244

    
245
#define SWITCH_TSS_JMP  0
246
#define SWITCH_TSS_IRET 1
247
#define SWITCH_TSS_CALL 2
248

    
249
/* XXX: restore CPU state in registers (PowerPC case) */
250
static void switch_tss(int tss_selector,
251
                       uint32_t e1, uint32_t e2, int source,
252
                       uint32_t next_eip)
253
{
254
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255
    target_ulong tss_base;
256
    uint32_t new_regs[8], new_segs[6];
257
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258
    uint32_t old_eflags, eflags_mask;
259
    SegmentCache *dt;
260
    int index;
261
    target_ulong ptr;
262

    
263
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264
#ifdef DEBUG_PCALL
265
    if (loglevel & CPU_LOG_PCALL)
266
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267
#endif
268

    
269
    /* if task gate, we read the TSS segment and we load it */
270
    if (type == 5) {
271
        if (!(e2 & DESC_P_MASK))
272
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273
        tss_selector = e1 >> 16;
274
        if (tss_selector & 4)
275
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276
        if (load_segment(&e1, &e2, tss_selector) != 0)
277
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278
        if (e2 & DESC_S_MASK)
279
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281
        if ((type & 7) != 1)
282
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283
    }
284

    
285
    if (!(e2 & DESC_P_MASK))
286
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287

    
288
    if (type & 8)
289
        tss_limit_max = 103;
290
    else
291
        tss_limit_max = 43;
292
    tss_limit = get_seg_limit(e1, e2);
293
    tss_base = get_seg_base(e1, e2);
294
    if ((tss_selector & 4) != 0 ||
295
        tss_limit < tss_limit_max)
296
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298
    if (old_type & 8)
299
        old_tss_limit_max = 103;
300
    else
301
        old_tss_limit_max = 43;
302

    
303
    /* read all the registers from the new TSS */
304
    if (type & 8) {
305
        /* 32 bit */
306
        new_cr3 = ldl_kernel(tss_base + 0x1c);
307
        new_eip = ldl_kernel(tss_base + 0x20);
308
        new_eflags = ldl_kernel(tss_base + 0x24);
309
        for(i = 0; i < 8; i++)
310
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311
        for(i = 0; i < 6; i++)
312
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313
        new_ldt = lduw_kernel(tss_base + 0x60);
314
        new_trap = ldl_kernel(tss_base + 0x64);
315
    } else {
316
        /* 16 bit */
317
        new_cr3 = 0;
318
        new_eip = lduw_kernel(tss_base + 0x0e);
319
        new_eflags = lduw_kernel(tss_base + 0x10);
320
        for(i = 0; i < 8; i++)
321
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322
        for(i = 0; i < 4; i++)
323
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324
        new_ldt = lduw_kernel(tss_base + 0x2a);
325
        new_segs[R_FS] = 0;
326
        new_segs[R_GS] = 0;
327
        new_trap = 0;
328
    }
329

    
330
    /* NOTE: we must avoid memory exceptions during the task switch,
331
       so we make dummy accesses before */
332
    /* XXX: it can still fail in some cases, so a bigger hack is
333
       necessary to valid the TLB after having done the accesses */
334

    
335
    v1 = ldub_kernel(env->tr.base);
336
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337
    stb_kernel(env->tr.base, v1);
338
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
339

    
340
    /* clear busy bit (it is restartable) */
341
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342
        target_ulong ptr;
343
        uint32_t e2;
344
        ptr = env->gdt.base + (env->tr.selector & ~7);
345
        e2 = ldl_kernel(ptr + 4);
346
        e2 &= ~DESC_TSS_BUSY_MASK;
347
        stl_kernel(ptr + 4, e2);
348
    }
349
    old_eflags = compute_eflags();
350
    if (source == SWITCH_TSS_IRET)
351
        old_eflags &= ~NT_MASK;
352

    
353
    /* save the current state in the old TSS */
354
    if (type & 8) {
355
        /* 32 bit */
356
        stl_kernel(env->tr.base + 0x20, next_eip);
357
        stl_kernel(env->tr.base + 0x24, old_eflags);
358
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366
        for(i = 0; i < 6; i++)
367
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368
    } else {
369
        /* 16 bit */
370
        stw_kernel(env->tr.base + 0x0e, next_eip);
371
        stw_kernel(env->tr.base + 0x10, old_eflags);
372
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380
        for(i = 0; i < 4; i++)
381
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382
    }
383

    
384
    /* now if an exception occurs, it will occurs in the next task
385
       context */
386

    
387
    if (source == SWITCH_TSS_CALL) {
388
        stw_kernel(tss_base, env->tr.selector);
389
        new_eflags |= NT_MASK;
390
    }
391

    
392
    /* set busy bit */
393
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394
        target_ulong ptr;
395
        uint32_t e2;
396
        ptr = env->gdt.base + (tss_selector & ~7);
397
        e2 = ldl_kernel(ptr + 4);
398
        e2 |= DESC_TSS_BUSY_MASK;
399
        stl_kernel(ptr + 4, e2);
400
    }
401

    
402
    /* set the new CPU state */
403
    /* from this point, any exception which occurs can give problems */
404
    env->cr[0] |= CR0_TS_MASK;
405
    env->hflags |= HF_TS_MASK;
406
    env->tr.selector = tss_selector;
407
    env->tr.base = tss_base;
408
    env->tr.limit = tss_limit;
409
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410

    
411
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412
        cpu_x86_update_cr3(env, new_cr3);
413
    }
414

    
415
    /* load all registers without an exception, then reload them with
416
       possible exception */
417
    env->eip = new_eip;
418
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420
    if (!(type & 8))
421
        eflags_mask &= 0xffff;
422
    load_eflags(new_eflags, eflags_mask);
423
    /* XXX: what to do in 16 bit case ? */
424
    EAX = new_regs[0];
425
    ECX = new_regs[1];
426
    EDX = new_regs[2];
427
    EBX = new_regs[3];
428
    ESP = new_regs[4];
429
    EBP = new_regs[5];
430
    ESI = new_regs[6];
431
    EDI = new_regs[7];
432
    if (new_eflags & VM_MASK) {
433
        for(i = 0; i < 6; i++)
434
            load_seg_vm(i, new_segs[i]);
435
        /* in vm86, CPL is always 3 */
436
        cpu_x86_set_cpl(env, 3);
437
    } else {
438
        /* CPL is set the RPL of CS */
439
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440
        /* first just selectors as the rest may trigger exceptions */
441
        for(i = 0; i < 6; i++)
442
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443
    }
444

    
445
    env->ldt.selector = new_ldt & ~4;
446
    env->ldt.base = 0;
447
    env->ldt.limit = 0;
448
    env->ldt.flags = 0;
449

    
450
    /* load the LDT */
451
    if (new_ldt & 4)
452
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453

    
454
    if ((new_ldt & 0xfffc) != 0) {
455
        dt = &env->gdt;
456
        index = new_ldt & ~7;
457
        if ((index + 7) > dt->limit)
458
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459
        ptr = dt->base + index;
460
        e1 = ldl_kernel(ptr);
461
        e2 = ldl_kernel(ptr + 4);
462
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464
        if (!(e2 & DESC_P_MASK))
465
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
467
    }
468

    
469
    /* load the segments */
470
    if (!(new_eflags & VM_MASK)) {
471
        tss_load_seg(R_CS, new_segs[R_CS]);
472
        tss_load_seg(R_SS, new_segs[R_SS]);
473
        tss_load_seg(R_ES, new_segs[R_ES]);
474
        tss_load_seg(R_DS, new_segs[R_DS]);
475
        tss_load_seg(R_FS, new_segs[R_FS]);
476
        tss_load_seg(R_GS, new_segs[R_GS]);
477
    }
478

    
479
    /* check that EIP is in the CS segment limits */
480
    if (new_eip > env->segs[R_CS].limit) {
481
        /* XXX: different exception if CALL ? */
482
        raise_exception_err(EXCP0D_GPF, 0);
483
    }
484
}
485

    
486
/* check if Port I/O is allowed in TSS */
487
static inline void check_io(int addr, int size)
488
{
489
    int io_offset, val, mask;
490

    
491
    /* TSS must be a valid 32 bit one */
492
    if (!(env->tr.flags & DESC_P_MASK) ||
493
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494
        env->tr.limit < 103)
495
        goto fail;
496
    io_offset = lduw_kernel(env->tr.base + 0x66);
497
    io_offset += (addr >> 3);
498
    /* Note: the check needs two bytes */
499
    if ((io_offset + 1) > env->tr.limit)
500
        goto fail;
501
    val = lduw_kernel(env->tr.base + io_offset);
502
    val >>= (addr & 7);
503
    mask = (1 << size) - 1;
504
    /* all bits must be zero to allow the I/O */
505
    if ((val & mask) != 0) {
506
    fail:
507
        raise_exception_err(EXCP0D_GPF, 0);
508
    }
509
}
510

    
511
void check_iob_T0(void)
512
{
513
    check_io(T0, 1);
514
}
515

    
516
void check_iow_T0(void)
517
{
518
    check_io(T0, 2);
519
}
520

    
521
void check_iol_T0(void)
522
{
523
    check_io(T0, 4);
524
}
525

    
526
void check_iob_DX(void)
527
{
528
    check_io(EDX & 0xffff, 1);
529
}
530

    
531
void check_iow_DX(void)
532
{
533
    check_io(EDX & 0xffff, 2);
534
}
535

    
536
void check_iol_DX(void)
537
{
538
    check_io(EDX & 0xffff, 4);
539
}
540

    
541
static inline unsigned int get_sp_mask(unsigned int e2)
542
{
543
    if (e2 & DESC_B_MASK)
544
        return 0xffffffff;
545
    else
546
        return 0xffff;
547
}
548

    
549
#ifdef TARGET_X86_64
550
#define SET_ESP(val, sp_mask)\
551
do {\
552
    if ((sp_mask) == 0xffff)\
553
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554
    else if ((sp_mask) == 0xffffffffLL)\
555
        ESP = (uint32_t)(val);\
556
    else\
557
        ESP = (val);\
558
} while (0)
559
#else
560
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561
#endif
562

    
563
/* XXX: add a is_user flag to have proper security support */
564
#define PUSHW(ssp, sp, sp_mask, val)\
565
{\
566
    sp -= 2;\
567
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568
}
569

    
570
#define PUSHL(ssp, sp, sp_mask, val)\
571
{\
572
    sp -= 4;\
573
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574
}
575

    
576
#define POPW(ssp, sp, sp_mask, val)\
577
{\
578
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579
    sp += 2;\
580
}
581

    
582
#define POPL(ssp, sp, sp_mask, val)\
583
{\
584
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585
    sp += 4;\
586
}
587

    
588
/* protected mode interrupt */
589
static void do_interrupt_protected(int intno, int is_int, int error_code,
590
                                   unsigned int next_eip, int is_hw)
591
{
592
    SegmentCache *dt;
593
    target_ulong ptr, ssp;
594
    int type, dpl, selector, ss_dpl, cpl;
595
    int has_error_code, new_stack, shift;
596
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597
    uint32_t old_eip, sp_mask;
598
    int svm_should_check = 1;
599

    
600
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601
        next_eip = EIP;
602
        svm_should_check = 0;
603
    }
604

    
605
    if (svm_should_check
606
        && (INTERCEPTEDl(_exceptions, 1 << intno)
607
        && !is_int)) {
608
        raise_interrupt(intno, is_int, error_code, 0);
609
    }
610
    has_error_code = 0;
611
    if (!is_int && !is_hw) {
612
        switch(intno) {
613
        case 8:
614
        case 10:
615
        case 11:
616
        case 12:
617
        case 13:
618
        case 14:
619
        case 17:
620
            has_error_code = 1;
621
            break;
622
        }
623
    }
624
    if (is_int)
625
        old_eip = next_eip;
626
    else
627
        old_eip = env->eip;
628

    
629
    dt = &env->idt;
630
    if (intno * 8 + 7 > dt->limit)
631
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632
    ptr = dt->base + intno * 8;
633
    e1 = ldl_kernel(ptr);
634
    e2 = ldl_kernel(ptr + 4);
635
    /* check gate type */
636
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637
    switch(type) {
638
    case 5: /* task gate */
639
        /* must do that check here to return the correct error code */
640
        if (!(e2 & DESC_P_MASK))
641
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643
        if (has_error_code) {
644
            int type;
645
            uint32_t mask;
646
            /* push the error code */
647
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648
            shift = type >> 3;
649
            if (env->segs[R_SS].flags & DESC_B_MASK)
650
                mask = 0xffffffff;
651
            else
652
                mask = 0xffff;
653
            esp = (ESP - (2 << shift)) & mask;
654
            ssp = env->segs[R_SS].base + esp;
655
            if (shift)
656
                stl_kernel(ssp, error_code);
657
            else
658
                stw_kernel(ssp, error_code);
659
            SET_ESP(esp, mask);
660
        }
661
        return;
662
    case 6: /* 286 interrupt gate */
663
    case 7: /* 286 trap gate */
664
    case 14: /* 386 interrupt gate */
665
    case 15: /* 386 trap gate */
666
        break;
667
    default:
668
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669
        break;
670
    }
671
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672
    cpl = env->hflags & HF_CPL_MASK;
673
    /* check privledge if software int */
674
    if (is_int && dpl < cpl)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    /* check valid bit */
677
    if (!(e2 & DESC_P_MASK))
678
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679
    selector = e1 >> 16;
680
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681
    if ((selector & 0xfffc) == 0)
682
        raise_exception_err(EXCP0D_GPF, 0);
683

    
684
    if (load_segment(&e1, &e2, selector) != 0)
685
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689
    if (dpl > cpl)
690
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691
    if (!(e2 & DESC_P_MASK))
692
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694
        /* to inner privilege */
695
        get_ss_esp_from_tss(&ss, &esp, dpl);
696
        if ((ss & 0xfffc) == 0)
697
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698
        if ((ss & 3) != dpl)
699
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703
        if (ss_dpl != dpl)
704
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705
        if (!(ss_e2 & DESC_S_MASK) ||
706
            (ss_e2 & DESC_CS_MASK) ||
707
            !(ss_e2 & DESC_W_MASK))
708
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709
        if (!(ss_e2 & DESC_P_MASK))
710
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711
        new_stack = 1;
712
        sp_mask = get_sp_mask(ss_e2);
713
        ssp = get_seg_base(ss_e1, ss_e2);
714
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715
        /* to same privilege */
716
        if (env->eflags & VM_MASK)
717
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718
        new_stack = 0;
719
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
720
        ssp = env->segs[R_SS].base;
721
        esp = ESP;
722
        dpl = cpl;
723
    } else {
724
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725
        new_stack = 0; /* avoid warning */
726
        sp_mask = 0; /* avoid warning */
727
        ssp = 0; /* avoid warning */
728
        esp = 0; /* avoid warning */
729
    }
730

    
731
    shift = type >> 3;
732

    
733
#if 0
734
    /* XXX: check that enough room is available */
735
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736
    if (env->eflags & VM_MASK)
737
        push_size += 8;
738
    push_size <<= shift;
739
#endif
740
    if (shift == 1) {
741
        if (new_stack) {
742
            if (env->eflags & VM_MASK) {
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747
            }
748
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749
            PUSHL(ssp, esp, sp_mask, ESP);
750
        }
751
        PUSHL(ssp, esp, sp_mask, compute_eflags());
752
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753
        PUSHL(ssp, esp, sp_mask, old_eip);
754
        if (has_error_code) {
755
            PUSHL(ssp, esp, sp_mask, error_code);
756
        }
757
    } else {
758
        if (new_stack) {
759
            if (env->eflags & VM_MASK) {
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764
            }
765
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766
            PUSHW(ssp, esp, sp_mask, ESP);
767
        }
768
        PUSHW(ssp, esp, sp_mask, compute_eflags());
769
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770
        PUSHW(ssp, esp, sp_mask, old_eip);
771
        if (has_error_code) {
772
            PUSHW(ssp, esp, sp_mask, error_code);
773
        }
774
    }
775

    
776
    if (new_stack) {
777
        if (env->eflags & VM_MASK) {
778
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782
        }
783
        ss = (ss & ~3) | dpl;
784
        cpu_x86_load_seg_cache(env, R_SS, ss,
785
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786
    }
787
    SET_ESP(esp, sp_mask);
788

    
789
    selector = (selector & ~3) | dpl;
790
    cpu_x86_load_seg_cache(env, R_CS, selector,
791
                   get_seg_base(e1, e2),
792
                   get_seg_limit(e1, e2),
793
                   e2);
794
    cpu_x86_set_cpl(env, dpl);
795
    env->eip = offset;
796

    
797
    /* interrupt gate clear IF mask */
798
    if ((type & 1) == 0) {
799
        env->eflags &= ~IF_MASK;
800
    }
801
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802
}
803

    
804
#ifdef TARGET_X86_64
805

    
806
#define PUSHQ(sp, val)\
807
{\
808
    sp -= 8;\
809
    stq_kernel(sp, (val));\
810
}
811

    
812
#define POPQ(sp, val)\
813
{\
814
    val = ldq_kernel(sp);\
815
    sp += 8;\
816
}
817

    
818
static inline target_ulong get_rsp_from_tss(int level)
819
{
820
    int index;
821

    
822
#if 0
823
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824
           env->tr.base, env->tr.limit);
825
#endif
826

    
827
    if (!(env->tr.flags & DESC_P_MASK))
828
        cpu_abort(env, "invalid tss");
829
    index = 8 * level + 4;
830
    if ((index + 7) > env->tr.limit)
831
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832
    return ldq_kernel(env->tr.base + index);
833
}
834

    
835
/* 64 bit interrupt */
836
static void do_interrupt64(int intno, int is_int, int error_code,
837
                           target_ulong next_eip, int is_hw)
838
{
839
    SegmentCache *dt;
840
    target_ulong ptr;
841
    int type, dpl, selector, cpl, ist;
842
    int has_error_code, new_stack;
843
    uint32_t e1, e2, e3, ss;
844
    target_ulong old_eip, esp, offset;
845
    int svm_should_check = 1;
846

    
847
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848
        next_eip = EIP;
849
        svm_should_check = 0;
850
    }
851
    if (svm_should_check
852
        && INTERCEPTEDl(_exceptions, 1 << intno)
853
        && !is_int) {
854
        raise_interrupt(intno, is_int, error_code, 0);
855
    }
856
    has_error_code = 0;
857
    if (!is_int && !is_hw) {
858
        switch(intno) {
859
        case 8:
860
        case 10:
861
        case 11:
862
        case 12:
863
        case 13:
864
        case 14:
865
        case 17:
866
            has_error_code = 1;
867
            break;
868
        }
869
    }
870
    if (is_int)
871
        old_eip = next_eip;
872
    else
873
        old_eip = env->eip;
874

    
875
    dt = &env->idt;
876
    if (intno * 16 + 15 > dt->limit)
877
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878
    ptr = dt->base + intno * 16;
879
    e1 = ldl_kernel(ptr);
880
    e2 = ldl_kernel(ptr + 4);
881
    e3 = ldl_kernel(ptr + 8);
882
    /* check gate type */
883
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884
    switch(type) {
885
    case 14: /* 386 interrupt gate */
886
    case 15: /* 386 trap gate */
887
        break;
888
    default:
889
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890
        break;
891
    }
892
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893
    cpl = env->hflags & HF_CPL_MASK;
894
    /* check privledge if software int */
895
    if (is_int && dpl < cpl)
896
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897
    /* check valid bit */
898
    if (!(e2 & DESC_P_MASK))
899
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900
    selector = e1 >> 16;
901
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902
    ist = e2 & 7;
903
    if ((selector & 0xfffc) == 0)
904
        raise_exception_err(EXCP0D_GPF, 0);
905

    
906
    if (load_segment(&e1, &e2, selector) != 0)
907
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911
    if (dpl > cpl)
912
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913
    if (!(e2 & DESC_P_MASK))
914
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918
        /* to inner privilege */
919
        if (ist != 0)
920
            esp = get_rsp_from_tss(ist + 3);
921
        else
922
            esp = get_rsp_from_tss(dpl);
923
        esp &= ~0xfLL; /* align stack */
924
        ss = 0;
925
        new_stack = 1;
926
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927
        /* to same privilege */
928
        if (env->eflags & VM_MASK)
929
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930
        new_stack = 0;
931
        if (ist != 0)
932
            esp = get_rsp_from_tss(ist + 3);
933
        else
934
            esp = ESP;
935
        esp &= ~0xfLL; /* align stack */
936
        dpl = cpl;
937
    } else {
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
        new_stack = 0; /* avoid warning */
940
        esp = 0; /* avoid warning */
941
    }
942

    
943
    PUSHQ(esp, env->segs[R_SS].selector);
944
    PUSHQ(esp, ESP);
945
    PUSHQ(esp, compute_eflags());
946
    PUSHQ(esp, env->segs[R_CS].selector);
947
    PUSHQ(esp, old_eip);
948
    if (has_error_code) {
949
        PUSHQ(esp, error_code);
950
    }
951

    
952
    if (new_stack) {
953
        ss = 0 | dpl;
954
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955
    }
956
    ESP = esp;
957

    
958
    selector = (selector & ~3) | dpl;
959
    cpu_x86_load_seg_cache(env, R_CS, selector,
960
                   get_seg_base(e1, e2),
961
                   get_seg_limit(e1, e2),
962
                   e2);
963
    cpu_x86_set_cpl(env, dpl);
964
    env->eip = offset;
965

    
966
    /* interrupt gate clear IF mask */
967
    if ((type & 1) == 0) {
968
        env->eflags &= ~IF_MASK;
969
    }
970
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971
}
972
#endif
973

    
974
#if defined(CONFIG_USER_ONLY)
975
void helper_syscall(int next_eip_addend)
976
{
977
    env->exception_index = EXCP_SYSCALL;
978
    env->exception_next_eip = env->eip + next_eip_addend;
979
    cpu_loop_exit();
980
}
981
#else
982
void helper_syscall(int next_eip_addend)
983
{
984
    int selector;
985

    
986
    if (!(env->efer & MSR_EFER_SCE)) {
987
        raise_exception_err(EXCP06_ILLOP, 0);
988
    }
989
    selector = (env->star >> 32) & 0xffff;
990
#ifdef TARGET_X86_64
991
    if (env->hflags & HF_LMA_MASK) {
992
        int code64;
993

    
994
        ECX = env->eip + next_eip_addend;
995
        env->regs[11] = compute_eflags();
996

    
997
        code64 = env->hflags & HF_CS64_MASK;
998

    
999
        cpu_x86_set_cpl(env, 0);
1000
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001
                           0, 0xffffffff,
1002
                               DESC_G_MASK | DESC_P_MASK |
1003
                               DESC_S_MASK |
1004
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006
                               0, 0xffffffff,
1007
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008
                               DESC_S_MASK |
1009
                               DESC_W_MASK | DESC_A_MASK);
1010
        env->eflags &= ~env->fmask;
1011
        if (code64)
1012
            env->eip = env->lstar;
1013
        else
1014
            env->eip = env->cstar;
1015
    } else
1016
#endif
1017
    {
1018
        ECX = (uint32_t)(env->eip + next_eip_addend);
1019

    
1020
        cpu_x86_set_cpl(env, 0);
1021
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022
                           0, 0xffffffff,
1023
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024
                               DESC_S_MASK |
1025
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027
                               0, 0xffffffff,
1028
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029
                               DESC_S_MASK |
1030
                               DESC_W_MASK | DESC_A_MASK);
1031
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1032
        env->eip = (uint32_t)env->star;
1033
    }
1034
}
1035
#endif
1036

    
1037
void helper_sysret(int dflag)
1038
{
1039
    int cpl, selector;
1040

    
1041
    if (!(env->efer & MSR_EFER_SCE)) {
1042
        raise_exception_err(EXCP06_ILLOP, 0);
1043
    }
1044
    cpl = env->hflags & HF_CPL_MASK;
1045
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046
        raise_exception_err(EXCP0D_GPF, 0);
1047
    }
1048
    selector = (env->star >> 48) & 0xffff;
1049
#ifdef TARGET_X86_64
1050
    if (env->hflags & HF_LMA_MASK) {
1051
        if (dflag == 2) {
1052
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1053
                                   0, 0xffffffff,
1054
                                   DESC_G_MASK | DESC_P_MASK |
1055
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1056
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1057
                                   DESC_L_MASK);
1058
            env->eip = ECX;
1059
        } else {
1060
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1061
                                   0, 0xffffffff,
1062
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1064
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065
            env->eip = (uint32_t)ECX;
1066
        }
1067
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1068
                               0, 0xffffffff,
1069
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071
                               DESC_W_MASK | DESC_A_MASK);
1072
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1073
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1074
        cpu_x86_set_cpl(env, 3);
1075
    } else
1076
#endif
1077
    {
1078
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079
                               0, 0xffffffff,
1080
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1081
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1082
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1083
        env->eip = (uint32_t)ECX;
1084
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1085
                               0, 0xffffffff,
1086
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                               DESC_W_MASK | DESC_A_MASK);
1089
        env->eflags |= IF_MASK;
1090
        cpu_x86_set_cpl(env, 3);
1091
    }
1092
#ifdef USE_KQEMU
1093
    if (kqemu_is_ok(env)) {
1094
        if (env->hflags & HF_LMA_MASK)
1095
            CC_OP = CC_OP_EFLAGS;
1096
        env->exception_index = -1;
1097
        cpu_loop_exit();
1098
    }
1099
#endif
1100
}
1101

    
1102
/* real mode interrupt */
1103
static void do_interrupt_real(int intno, int is_int, int error_code,
1104
                              unsigned int next_eip)
1105
{
1106
    SegmentCache *dt;
1107
    target_ulong ptr, ssp;
1108
    int selector;
1109
    uint32_t offset, esp;
1110
    uint32_t old_cs, old_eip;
1111
    int svm_should_check = 1;
1112

    
1113
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1114
        next_eip = EIP;
1115
        svm_should_check = 0;
1116
    }
1117
    if (svm_should_check
1118
        && INTERCEPTEDl(_exceptions, 1 << intno)
1119
        && !is_int) {
1120
        raise_interrupt(intno, is_int, error_code, 0);
1121
    }
1122
    /* real mode (simpler !) */
1123
    dt = &env->idt;
1124
    if (intno * 4 + 3 > dt->limit)
1125
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
    ptr = dt->base + intno * 4;
1127
    offset = lduw_kernel(ptr);
1128
    selector = lduw_kernel(ptr + 2);
1129
    esp = ESP;
1130
    ssp = env->segs[R_SS].base;
1131
    if (is_int)
1132
        old_eip = next_eip;
1133
    else
1134
        old_eip = env->eip;
1135
    old_cs = env->segs[R_CS].selector;
1136
    /* XXX: use SS segment size ? */
1137
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1138
    PUSHW(ssp, esp, 0xffff, old_cs);
1139
    PUSHW(ssp, esp, 0xffff, old_eip);
1140

    
1141
    /* update processor state */
1142
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143
    env->eip = offset;
1144
    env->segs[R_CS].selector = selector;
1145
    env->segs[R_CS].base = (selector << 4);
1146
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147
}
1148

    
1149
/* fake user mode interrupt */
1150
void do_interrupt_user(int intno, int is_int, int error_code,
1151
                       target_ulong next_eip)
1152
{
1153
    SegmentCache *dt;
1154
    target_ulong ptr;
1155
    int dpl, cpl, shift;
1156
    uint32_t e2;
1157

    
1158
    dt = &env->idt;
1159
    if (env->hflags & HF_LMA_MASK) {
1160
        shift = 4;
1161
    } else {
1162
        shift = 3;
1163
    }
1164
    ptr = dt->base + (intno << shift);
1165
    e2 = ldl_kernel(ptr + 4);
1166

    
1167
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168
    cpl = env->hflags & HF_CPL_MASK;
1169
    /* check privledge if software int */
1170
    if (is_int && dpl < cpl)
1171
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172

    
1173
    /* Since we emulate only user space, we cannot do more than
1174
       exiting the emulation with the suitable exception and error
1175
       code */
1176
    if (is_int)
1177
        EIP = next_eip;
1178
}
1179

    
1180
/*
1181
 * Begin execution of an interruption. is_int is TRUE if coming from
1182
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183
 * instruction. It is only relevant if is_int is TRUE.
1184
 */
1185
void do_interrupt(int intno, int is_int, int error_code,
1186
                  target_ulong next_eip, int is_hw)
1187
{
1188
    if (loglevel & CPU_LOG_INT) {
1189
        if ((env->cr[0] & CR0_PE_MASK)) {
1190
            static int count;
1191
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192
                    count, intno, error_code, is_int,
1193
                    env->hflags & HF_CPL_MASK,
1194
                    env->segs[R_CS].selector, EIP,
1195
                    (int)env->segs[R_CS].base + EIP,
1196
                    env->segs[R_SS].selector, ESP);
1197
            if (intno == 0x0e) {
1198
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199
            } else {
1200
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201
            }
1202
            fprintf(logfile, "\n");
1203
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204
#if 0
1205
            {
1206
                int i;
1207
                uint8_t *ptr;
1208
                fprintf(logfile, "       code=");
1209
                ptr = env->segs[R_CS].base + env->eip;
1210
                for(i = 0; i < 16; i++) {
1211
                    fprintf(logfile, " %02x", ldub(ptr + i));
1212
                }
1213
                fprintf(logfile, "\n");
1214
            }
1215
#endif
1216
            count++;
1217
        }
1218
    }
1219
    if (env->cr[0] & CR0_PE_MASK) {
1220
#if TARGET_X86_64
1221
        if (env->hflags & HF_LMA_MASK) {
1222
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223
        } else
1224
#endif
1225
        {
1226
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227
        }
1228
    } else {
1229
        do_interrupt_real(intno, is_int, error_code, next_eip);
1230
    }
1231
}
1232

    
1233
/*
1234
 * Check nested exceptions and change to double or triple fault if
1235
 * needed. It should only be called, if this is not an interrupt.
1236
 * Returns the new exception number.
1237
 */
1238
static int check_exception(int intno, int *error_code)
1239
{
1240
    char first_contributory = env->old_exception == 0 ||
1241
                              (env->old_exception >= 10 &&
1242
                               env->old_exception <= 13);
1243
    char second_contributory = intno == 0 ||
1244
                               (intno >= 10 && intno <= 13);
1245

    
1246
    if (loglevel & CPU_LOG_INT)
1247
        fprintf(logfile, "check_exception old: %x new %x\n",
1248
                env->old_exception, intno);
1249

    
1250
    if (env->old_exception == EXCP08_DBLE)
1251
        cpu_abort(env, "triple fault");
1252

    
1253
    if ((first_contributory && second_contributory)
1254
        || (env->old_exception == EXCP0E_PAGE &&
1255
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1256
        intno = EXCP08_DBLE;
1257
        *error_code = 0;
1258
    }
1259

    
1260
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1261
        (intno == EXCP08_DBLE))
1262
        env->old_exception = intno;
1263

    
1264
    return intno;
1265
}
1266

    
1267
/*
1268
 * Signal an interruption. It is executed in the main CPU loop.
1269
 * is_int is TRUE if coming from the int instruction. next_eip is the
1270
 * EIP value AFTER the interrupt instruction. It is only relevant if
1271
 * is_int is TRUE.
1272
 */
1273
void raise_interrupt(int intno, int is_int, int error_code,
1274
                     int next_eip_addend)
1275
{
1276
    if (!is_int) {
1277
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278
        intno = check_exception(intno, &error_code);
1279
    }
1280

    
1281
    env->exception_index = intno;
1282
    env->error_code = error_code;
1283
    env->exception_is_int = is_int;
1284
    env->exception_next_eip = env->eip + next_eip_addend;
1285
    cpu_loop_exit();
1286
}
1287

    
1288
/* same as raise_exception_err, but do not restore global registers */
1289
static void raise_exception_err_norestore(int exception_index, int error_code)
1290
{
1291
    exception_index = check_exception(exception_index, &error_code);
1292

    
1293
    env->exception_index = exception_index;
1294
    env->error_code = error_code;
1295
    env->exception_is_int = 0;
1296
    env->exception_next_eip = 0;
1297
    longjmp(env->jmp_env, 1);
1298
}
1299

    
1300
/* shortcuts to generate exceptions */
1301

    
1302
void (raise_exception_err)(int exception_index, int error_code)
1303
{
1304
    raise_interrupt(exception_index, 0, error_code, 0);
1305
}
1306

    
1307
void raise_exception(int exception_index)
1308
{
1309
    raise_interrupt(exception_index, 0, 0, 0);
1310
}
1311

    
1312
/* SMM support */
1313

    
1314
#if defined(CONFIG_USER_ONLY)
1315

    
1316
void do_smm_enter(void)
1317
{
1318
}
1319

    
1320
void helper_rsm(void)
1321
{
1322
}
1323

    
1324
#else
1325

    
1326
#ifdef TARGET_X86_64
1327
#define SMM_REVISION_ID 0x00020064
1328
#else
1329
#define SMM_REVISION_ID 0x00020000
1330
#endif
1331

    
1332
void do_smm_enter(void)
1333
{
1334
    target_ulong sm_state;
1335
    SegmentCache *dt;
1336
    int i, offset;
1337

    
1338
    if (loglevel & CPU_LOG_INT) {
1339
        fprintf(logfile, "SMM: enter\n");
1340
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1341
    }
1342

    
1343
    env->hflags |= HF_SMM_MASK;
1344
    cpu_smm_update(env);
1345

    
1346
    sm_state = env->smbase + 0x8000;
1347

    
1348
#ifdef TARGET_X86_64
1349
    for(i = 0; i < 6; i++) {
1350
        dt = &env->segs[i];
1351
        offset = 0x7e00 + i * 16;
1352
        stw_phys(sm_state + offset, dt->selector);
1353
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1354
        stl_phys(sm_state + offset + 4, dt->limit);
1355
        stq_phys(sm_state + offset + 8, dt->base);
1356
    }
1357

    
1358
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1359
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1360

    
1361
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1362
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1363
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1364
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1365

    
1366
    stq_phys(sm_state + 0x7e88, env->idt.base);
1367
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1368

    
1369
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1370
    stq_phys(sm_state + 0x7e98, env->tr.base);
1371
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1372
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1373

    
1374
    stq_phys(sm_state + 0x7ed0, env->efer);
1375

    
1376
    stq_phys(sm_state + 0x7ff8, EAX);
1377
    stq_phys(sm_state + 0x7ff0, ECX);
1378
    stq_phys(sm_state + 0x7fe8, EDX);
1379
    stq_phys(sm_state + 0x7fe0, EBX);
1380
    stq_phys(sm_state + 0x7fd8, ESP);
1381
    stq_phys(sm_state + 0x7fd0, EBP);
1382
    stq_phys(sm_state + 0x7fc8, ESI);
1383
    stq_phys(sm_state + 0x7fc0, EDI);
1384
    for(i = 8; i < 16; i++)
1385
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1386
    stq_phys(sm_state + 0x7f78, env->eip);
1387
    stl_phys(sm_state + 0x7f70, compute_eflags());
1388
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1389
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1390

    
1391
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1392
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1393
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1394

    
1395
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1396
    stl_phys(sm_state + 0x7f00, env->smbase);
1397
#else
1398
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1399
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1400
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1401
    stl_phys(sm_state + 0x7ff0, env->eip);
1402
    stl_phys(sm_state + 0x7fec, EDI);
1403
    stl_phys(sm_state + 0x7fe8, ESI);
1404
    stl_phys(sm_state + 0x7fe4, EBP);
1405
    stl_phys(sm_state + 0x7fe0, ESP);
1406
    stl_phys(sm_state + 0x7fdc, EBX);
1407
    stl_phys(sm_state + 0x7fd8, EDX);
1408
    stl_phys(sm_state + 0x7fd4, ECX);
1409
    stl_phys(sm_state + 0x7fd0, EAX);
1410
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1411
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1412

    
1413
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1414
    stl_phys(sm_state + 0x7f64, env->tr.base);
1415
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1416
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1417

    
1418
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1419
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1420
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1421
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1422

    
1423
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1424
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1425

    
1426
    stl_phys(sm_state + 0x7f58, env->idt.base);
1427
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1428

    
1429
    for(i = 0; i < 6; i++) {
1430
        dt = &env->segs[i];
1431
        if (i < 3)
1432
            offset = 0x7f84 + i * 12;
1433
        else
1434
            offset = 0x7f2c + (i - 3) * 12;
1435
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1436
        stl_phys(sm_state + offset + 8, dt->base);
1437
        stl_phys(sm_state + offset + 4, dt->limit);
1438
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1439
    }
1440
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1441

    
1442
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443
    stl_phys(sm_state + 0x7ef8, env->smbase);
1444
#endif
1445
    /* init SMM cpu state */
1446

    
1447
#ifdef TARGET_X86_64
1448
    env->efer = 0;
1449
    env->hflags &= ~HF_LMA_MASK;
1450
#endif
1451
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1452
    env->eip = 0x00008000;
1453
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1454
                           0xffffffff, 0);
1455
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1456
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1457
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1458
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1459
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1460

    
1461
    cpu_x86_update_cr0(env,
1462
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1463
    cpu_x86_update_cr4(env, 0);
1464
    env->dr[7] = 0x00000400;
1465
    CC_OP = CC_OP_EFLAGS;
1466
}
1467

    
1468
void helper_rsm(void)
1469
{
1470
    target_ulong sm_state;
1471
    int i, offset;
1472
    uint32_t val;
1473

    
1474
    sm_state = env->smbase + 0x8000;
1475
#ifdef TARGET_X86_64
1476
    env->efer = ldq_phys(sm_state + 0x7ed0);
1477
    if (env->efer & MSR_EFER_LMA)
1478
        env->hflags |= HF_LMA_MASK;
1479
    else
1480
        env->hflags &= ~HF_LMA_MASK;
1481

    
1482
    for(i = 0; i < 6; i++) {
1483
        offset = 0x7e00 + i * 16;
1484
        cpu_x86_load_seg_cache(env, i,
1485
                               lduw_phys(sm_state + offset),
1486
                               ldq_phys(sm_state + offset + 8),
1487
                               ldl_phys(sm_state + offset + 4),
1488
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1489
    }
1490

    
1491
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1492
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1493

    
1494
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1495
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1496
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1497
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1498

    
1499
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1500
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1501

    
1502
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1503
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1504
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1505
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1506

    
1507
    EAX = ldq_phys(sm_state + 0x7ff8);
1508
    ECX = ldq_phys(sm_state + 0x7ff0);
1509
    EDX = ldq_phys(sm_state + 0x7fe8);
1510
    EBX = ldq_phys(sm_state + 0x7fe0);
1511
    ESP = ldq_phys(sm_state + 0x7fd8);
1512
    EBP = ldq_phys(sm_state + 0x7fd0);
1513
    ESI = ldq_phys(sm_state + 0x7fc8);
1514
    EDI = ldq_phys(sm_state + 0x7fc0);
1515
    for(i = 8; i < 16; i++)
1516
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1517
    env->eip = ldq_phys(sm_state + 0x7f78);
1518
    load_eflags(ldl_phys(sm_state + 0x7f70),
1519
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1521
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1522

    
1523
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1524
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1525
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1526

    
1527
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1528
    if (val & 0x20000) {
1529
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1530
    }
1531
#else
1532
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1533
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1534
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->eip = ldl_phys(sm_state + 0x7ff0);
1537
    EDI = ldl_phys(sm_state + 0x7fec);
1538
    ESI = ldl_phys(sm_state + 0x7fe8);
1539
    EBP = ldl_phys(sm_state + 0x7fe4);
1540
    ESP = ldl_phys(sm_state + 0x7fe0);
1541
    EBX = ldl_phys(sm_state + 0x7fdc);
1542
    EDX = ldl_phys(sm_state + 0x7fd8);
1543
    ECX = ldl_phys(sm_state + 0x7fd4);
1544
    EAX = ldl_phys(sm_state + 0x7fd0);
1545
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1546
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1547

    
1548
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1549
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1551
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1552

    
1553
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1554
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1555
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1556
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1557

    
1558
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1559
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1560

    
1561
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1562
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1563

    
1564
    for(i = 0; i < 6; i++) {
1565
        if (i < 3)
1566
            offset = 0x7f84 + i * 12;
1567
        else
1568
            offset = 0x7f2c + (i - 3) * 12;
1569
        cpu_x86_load_seg_cache(env, i,
1570
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1571
                               ldl_phys(sm_state + offset + 8),
1572
                               ldl_phys(sm_state + offset + 4),
1573
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1574
    }
1575
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1576

    
1577
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578
    if (val & 0x20000) {
1579
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1580
    }
1581
#endif
1582
    CC_OP = CC_OP_EFLAGS;
1583
    env->hflags &= ~HF_SMM_MASK;
1584
    cpu_smm_update(env);
1585

    
1586
    if (loglevel & CPU_LOG_INT) {
1587
        fprintf(logfile, "SMM: after RSM\n");
1588
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1589
    }
1590
}
1591

    
1592
#endif /* !CONFIG_USER_ONLY */
1593

    
1594

    
1595
#ifdef BUGGY_GCC_DIV64
1596
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597
   call it from another function */
1598
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1599
{
1600
    *q_ptr = num / den;
1601
    return num % den;
1602
}
1603

    
1604
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1605
{
1606
    *q_ptr = num / den;
1607
    return num % den;
1608
}
1609
#endif
1610

    
1611
void helper_divl_EAX_T0(void)
1612
{
1613
    unsigned int den, r;
1614
    uint64_t num, q;
1615

    
1616
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1617
    den = T0;
1618
    if (den == 0) {
1619
        raise_exception(EXCP00_DIVZ);
1620
    }
1621
#ifdef BUGGY_GCC_DIV64
1622
    r = div32(&q, num, den);
1623
#else
1624
    q = (num / den);
1625
    r = (num % den);
1626
#endif
1627
    if (q > 0xffffffff)
1628
        raise_exception(EXCP00_DIVZ);
1629
    EAX = (uint32_t)q;
1630
    EDX = (uint32_t)r;
1631
}
1632

    
1633
void helper_idivl_EAX_T0(void)
1634
{
1635
    int den, r;
1636
    int64_t num, q;
1637

    
1638
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1639
    den = T0;
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
#ifdef BUGGY_GCC_DIV64
1644
    r = idiv32(&q, num, den);
1645
#else
1646
    q = (num / den);
1647
    r = (num % den);
1648
#endif
1649
    if (q != (int32_t)q)
1650
        raise_exception(EXCP00_DIVZ);
1651
    EAX = (uint32_t)q;
1652
    EDX = (uint32_t)r;
1653
}
1654

    
1655
void helper_cmpxchg8b(void)
1656
{
1657
    uint64_t d;
1658
    int eflags;
1659

    
1660
    eflags = cc_table[CC_OP].compute_all();
1661
    d = ldq(A0);
1662
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1663
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1664
        eflags |= CC_Z;
1665
    } else {
1666
        EDX = d >> 32;
1667
        EAX = d;
1668
        eflags &= ~CC_Z;
1669
    }
1670
    CC_SRC = eflags;
1671
}
1672

    
1673
void helper_single_step()
1674
{
1675
    env->dr[6] |= 0x4000;
1676
    raise_exception(EXCP01_SSTP);
1677
}
1678

    
1679
void helper_cpuid(void)
1680
{
1681
    uint32_t index;
1682
    index = (uint32_t)EAX;
1683

    
1684
    /* test if maximum index reached */
1685
    if (index & 0x80000000) {
1686
        if (index > env->cpuid_xlevel)
1687
            index = env->cpuid_level;
1688
    } else {
1689
        if (index > env->cpuid_level)
1690
            index = env->cpuid_level;
1691
    }
1692

    
1693
    switch(index) {
1694
    case 0:
1695
        EAX = env->cpuid_level;
1696
        EBX = env->cpuid_vendor1;
1697
        EDX = env->cpuid_vendor2;
1698
        ECX = env->cpuid_vendor3;
1699
        break;
1700
    case 1:
1701
        EAX = env->cpuid_version;
1702
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703
        ECX = env->cpuid_ext_features;
1704
        EDX = env->cpuid_features;
1705
        break;
1706
    case 2:
1707
        /* cache info: needed for Pentium Pro compatibility */
1708
        EAX = 1;
1709
        EBX = 0;
1710
        ECX = 0;
1711
        EDX = 0x2c307d;
1712
        break;
1713
    case 0x80000000:
1714
        EAX = env->cpuid_xlevel;
1715
        EBX = env->cpuid_vendor1;
1716
        EDX = env->cpuid_vendor2;
1717
        ECX = env->cpuid_vendor3;
1718
        break;
1719
    case 0x80000001:
1720
        EAX = env->cpuid_features;
1721
        EBX = 0;
1722
        ECX = env->cpuid_ext3_features;
1723
        EDX = env->cpuid_ext2_features;
1724
        break;
1725
    case 0x80000002:
1726
    case 0x80000003:
1727
    case 0x80000004:
1728
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1729
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1730
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1731
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1732
        break;
1733
    case 0x80000005:
1734
        /* cache info (L1 cache) */
1735
        EAX = 0x01ff01ff;
1736
        EBX = 0x01ff01ff;
1737
        ECX = 0x40020140;
1738
        EDX = 0x40020140;
1739
        break;
1740
    case 0x80000006:
1741
        /* cache info (L2 cache) */
1742
        EAX = 0;
1743
        EBX = 0x42004200;
1744
        ECX = 0x02008140;
1745
        EDX = 0;
1746
        break;
1747
    case 0x80000008:
1748
        /* virtual & phys address size in low 2 bytes. */
1749
        EAX = 0x00003028;
1750
        EBX = 0;
1751
        ECX = 0;
1752
        EDX = 0;
1753
        break;
1754
    default:
1755
        /* reserved values: zero */
1756
        EAX = 0;
1757
        EBX = 0;
1758
        ECX = 0;
1759
        EDX = 0;
1760
        break;
1761
    }
1762
}
1763

    
1764
void helper_enter_level(int level, int data32)
1765
{
1766
    target_ulong ssp;
1767
    uint32_t esp_mask, esp, ebp;
1768

    
1769
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1770
    ssp = env->segs[R_SS].base;
1771
    ebp = EBP;
1772
    esp = ESP;
1773
    if (data32) {
1774
        /* 32 bit */
1775
        esp -= 4;
1776
        while (--level) {
1777
            esp -= 4;
1778
            ebp -= 4;
1779
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1780
        }
1781
        esp -= 4;
1782
        stl(ssp + (esp & esp_mask), T1);
1783
    } else {
1784
        /* 16 bit */
1785
        esp -= 2;
1786
        while (--level) {
1787
            esp -= 2;
1788
            ebp -= 2;
1789
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1790
        }
1791
        esp -= 2;
1792
        stw(ssp + (esp & esp_mask), T1);
1793
    }
1794
}
1795

    
1796
#ifdef TARGET_X86_64
1797
void helper_enter64_level(int level, int data64)
1798
{
1799
    target_ulong esp, ebp;
1800
    ebp = EBP;
1801
    esp = ESP;
1802

    
1803
    if (data64) {
1804
        /* 64 bit */
1805
        esp -= 8;
1806
        while (--level) {
1807
            esp -= 8;
1808
            ebp -= 8;
1809
            stq(esp, ldq(ebp));
1810
        }
1811
        esp -= 8;
1812
        stq(esp, T1);
1813
    } else {
1814
        /* 16 bit */
1815
        esp -= 2;
1816
        while (--level) {
1817
            esp -= 2;
1818
            ebp -= 2;
1819
            stw(esp, lduw(ebp));
1820
        }
1821
        esp -= 2;
1822
        stw(esp, T1);
1823
    }
1824
}
1825
#endif
1826

    
1827
void helper_lldt_T0(void)
1828
{
1829
    int selector;
1830
    SegmentCache *dt;
1831
    uint32_t e1, e2;
1832
    int index, entry_limit;
1833
    target_ulong ptr;
1834

    
1835
    selector = T0 & 0xffff;
1836
    if ((selector & 0xfffc) == 0) {
1837
        /* XXX: NULL selector case: invalid LDT */
1838
        env->ldt.base = 0;
1839
        env->ldt.limit = 0;
1840
    } else {
1841
        if (selector & 0x4)
1842
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1843
        dt = &env->gdt;
1844
        index = selector & ~7;
1845
#ifdef TARGET_X86_64
1846
        if (env->hflags & HF_LMA_MASK)
1847
            entry_limit = 15;
1848
        else
1849
#endif
1850
            entry_limit = 7;
1851
        if ((index + entry_limit) > dt->limit)
1852
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1853
        ptr = dt->base + index;
1854
        e1 = ldl_kernel(ptr);
1855
        e2 = ldl_kernel(ptr + 4);
1856
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1857
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1858
        if (!(e2 & DESC_P_MASK))
1859
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1860
#ifdef TARGET_X86_64
1861
        if (env->hflags & HF_LMA_MASK) {
1862
            uint32_t e3;
1863
            e3 = ldl_kernel(ptr + 8);
1864
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1865
            env->ldt.base |= (target_ulong)e3 << 32;
1866
        } else
1867
#endif
1868
        {
1869
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1870
        }
1871
    }
1872
    env->ldt.selector = selector;
1873
}
1874

    
1875
void helper_ltr_T0(void)
1876
{
1877
    int selector;
1878
    SegmentCache *dt;
1879
    uint32_t e1, e2;
1880
    int index, type, entry_limit;
1881
    target_ulong ptr;
1882

    
1883
    selector = T0 & 0xffff;
1884
    if ((selector & 0xfffc) == 0) {
1885
        /* NULL selector case: invalid TR */
1886
        env->tr.base = 0;
1887
        env->tr.limit = 0;
1888
        env->tr.flags = 0;
1889
    } else {
1890
        if (selector & 0x4)
1891
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1892
        dt = &env->gdt;
1893
        index = selector & ~7;
1894
#ifdef TARGET_X86_64
1895
        if (env->hflags & HF_LMA_MASK)
1896
            entry_limit = 15;
1897
        else
1898
#endif
1899
            entry_limit = 7;
1900
        if ((index + entry_limit) > dt->limit)
1901
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1902
        ptr = dt->base + index;
1903
        e1 = ldl_kernel(ptr);
1904
        e2 = ldl_kernel(ptr + 4);
1905
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1906
        if ((e2 & DESC_S_MASK) ||
1907
            (type != 1 && type != 9))
1908
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1909
        if (!(e2 & DESC_P_MASK))
1910
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1911
#ifdef TARGET_X86_64
1912
        if (env->hflags & HF_LMA_MASK) {
1913
            uint32_t e3, e4;
1914
            e3 = ldl_kernel(ptr + 8);
1915
            e4 = ldl_kernel(ptr + 12);
1916
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1917
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1918
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1919
            env->tr.base |= (target_ulong)e3 << 32;
1920
        } else
1921
#endif
1922
        {
1923
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1924
        }
1925
        e2 |= DESC_TSS_BUSY_MASK;
1926
        stl_kernel(ptr + 4, e2);
1927
    }
1928
    env->tr.selector = selector;
1929
}
1930

    
1931
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1932
void load_seg(int seg_reg, int selector)
1933
{
1934
    uint32_t e1, e2;
1935
    int cpl, dpl, rpl;
1936
    SegmentCache *dt;
1937
    int index;
1938
    target_ulong ptr;
1939

    
1940
    selector &= 0xffff;
1941
    cpl = env->hflags & HF_CPL_MASK;
1942
    if ((selector & 0xfffc) == 0) {
1943
        /* null selector case */
1944
        if (seg_reg == R_SS
1945
#ifdef TARGET_X86_64
1946
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1947
#endif
1948
            )
1949
            raise_exception_err(EXCP0D_GPF, 0);
1950
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1951
    } else {
1952

    
1953
        if (selector & 0x4)
1954
            dt = &env->ldt;
1955
        else
1956
            dt = &env->gdt;
1957
        index = selector & ~7;
1958
        if ((index + 7) > dt->limit)
1959
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1960
        ptr = dt->base + index;
1961
        e1 = ldl_kernel(ptr);
1962
        e2 = ldl_kernel(ptr + 4);
1963

    
1964
        if (!(e2 & DESC_S_MASK))
1965
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1966
        rpl = selector & 3;
1967
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1968
        if (seg_reg == R_SS) {
1969
            /* must be writable segment */
1970
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1971
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1972
            if (rpl != cpl || dpl != cpl)
1973
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1974
        } else {
1975
            /* must be readable segment */
1976
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1977
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1978

    
1979
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1980
                /* if not conforming code, test rights */
1981
                if (dpl < cpl || dpl < rpl)
1982
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1983
            }
1984
        }
1985

    
1986
        if (!(e2 & DESC_P_MASK)) {
1987
            if (seg_reg == R_SS)
1988
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1989
            else
1990
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1991
        }
1992

    
1993
        /* set the access bit if not already set */
1994
        if (!(e2 & DESC_A_MASK)) {
1995
            e2 |= DESC_A_MASK;
1996
            stl_kernel(ptr + 4, e2);
1997
        }
1998

    
1999
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2000
                       get_seg_base(e1, e2),
2001
                       get_seg_limit(e1, e2),
2002
                       e2);
2003
#if 0
2004
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2005
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2006
#endif
2007
    }
2008
}
2009

    
2010
/* protected mode jump */
2011
void helper_ljmp_protected_T0_T1(int next_eip_addend)
2012
{
2013
    int new_cs, gate_cs, type;
2014
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2015
    target_ulong new_eip, next_eip;
2016

    
2017
    new_cs = T0;
2018
    new_eip = T1;
2019
    if ((new_cs & 0xfffc) == 0)
2020
        raise_exception_err(EXCP0D_GPF, 0);
2021
    if (load_segment(&e1, &e2, new_cs) != 0)
2022
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2023
    cpl = env->hflags & HF_CPL_MASK;
2024
    if (e2 & DESC_S_MASK) {
2025
        if (!(e2 & DESC_CS_MASK))
2026
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2027
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2028
        if (e2 & DESC_C_MASK) {
2029
            /* conforming code segment */
2030
            if (dpl > cpl)
2031
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2032
        } else {
2033
            /* non conforming code segment */
2034
            rpl = new_cs & 3;
2035
            if (rpl > cpl)
2036
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2037
            if (dpl != cpl)
2038
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2039
        }
2040
        if (!(e2 & DESC_P_MASK))
2041
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2042
        limit = get_seg_limit(e1, e2);
2043
        if (new_eip > limit &&
2044
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2045
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2046
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2047
                       get_seg_base(e1, e2), limit, e2);
2048
        EIP = new_eip;
2049
    } else {
2050
        /* jump to call or task gate */
2051
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2052
        rpl = new_cs & 3;
2053
        cpl = env->hflags & HF_CPL_MASK;
2054
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2055
        switch(type) {
2056
        case 1: /* 286 TSS */
2057
        case 9: /* 386 TSS */
2058
        case 5: /* task gate */
2059
            if (dpl < cpl || dpl < rpl)
2060
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2061
            next_eip = env->eip + next_eip_addend;
2062
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2063
            CC_OP = CC_OP_EFLAGS;
2064
            break;
2065
        case 4: /* 286 call gate */
2066
        case 12: /* 386 call gate */
2067
            if ((dpl < cpl) || (dpl < rpl))
2068
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2069
            if (!(e2 & DESC_P_MASK))
2070
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2071
            gate_cs = e1 >> 16;
2072
            new_eip = (e1 & 0xffff);
2073
            if (type == 12)
2074
                new_eip |= (e2 & 0xffff0000);
2075
            if (load_segment(&e1, &e2, gate_cs) != 0)
2076
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2077
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2078
            /* must be code segment */
2079
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2080
                 (DESC_S_MASK | DESC_CS_MASK)))
2081
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2082
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2083
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2084
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2085
            if (!(e2 & DESC_P_MASK))
2086
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2087
            limit = get_seg_limit(e1, e2);
2088
            if (new_eip > limit)
2089
                raise_exception_err(EXCP0D_GPF, 0);
2090
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2091
                                   get_seg_base(e1, e2), limit, e2);
2092
            EIP = new_eip;
2093
            break;
2094
        default:
2095
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2096
            break;
2097
        }
2098
    }
2099
}
2100

    
2101
/* real mode call */
2102
void helper_lcall_real_T0_T1(int shift, int next_eip)
2103
{
2104
    int new_cs, new_eip;
2105
    uint32_t esp, esp_mask;
2106
    target_ulong ssp;
2107

    
2108
    new_cs = T0;
2109
    new_eip = T1;
2110
    esp = ESP;
2111
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2112
    ssp = env->segs[R_SS].base;
2113
    if (shift) {
2114
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2115
        PUSHL(ssp, esp, esp_mask, next_eip);
2116
    } else {
2117
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2118
        PUSHW(ssp, esp, esp_mask, next_eip);
2119
    }
2120

    
2121
    SET_ESP(esp, esp_mask);
2122
    env->eip = new_eip;
2123
    env->segs[R_CS].selector = new_cs;
2124
    env->segs[R_CS].base = (new_cs << 4);
2125
}
2126

    
2127
/* protected mode call */
2128
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2129
{
2130
    int new_cs, new_stack, i;
2131
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2132
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2133
    uint32_t val, limit, old_sp_mask;
2134
    target_ulong ssp, old_ssp, next_eip, new_eip;
2135

    
2136
    new_cs = T0;
2137
    new_eip = T1;
2138
    next_eip = env->eip + next_eip_addend;
2139
#ifdef DEBUG_PCALL
2140
    if (loglevel & CPU_LOG_PCALL) {
2141
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2142
                new_cs, (uint32_t)new_eip, shift);
2143
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2144
    }
2145
#endif
2146
    if ((new_cs & 0xfffc) == 0)
2147
        raise_exception_err(EXCP0D_GPF, 0);
2148
    if (load_segment(&e1, &e2, new_cs) != 0)
2149
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2150
    cpl = env->hflags & HF_CPL_MASK;
2151
#ifdef DEBUG_PCALL
2152
    if (loglevel & CPU_LOG_PCALL) {
2153
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2154
    }
2155
#endif
2156
    if (e2 & DESC_S_MASK) {
2157
        if (!(e2 & DESC_CS_MASK))
2158
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2159
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2160
        if (e2 & DESC_C_MASK) {
2161
            /* conforming code segment */
2162
            if (dpl > cpl)
2163
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2164
        } else {
2165
            /* non conforming code segment */
2166
            rpl = new_cs & 3;
2167
            if (rpl > cpl)
2168
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2169
            if (dpl != cpl)
2170
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171
        }
2172
        if (!(e2 & DESC_P_MASK))
2173
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2174

    
2175
#ifdef TARGET_X86_64
2176
        /* XXX: check 16/32 bit cases in long mode */
2177
        if (shift == 2) {
2178
            target_ulong rsp;
2179
            /* 64 bit case */
2180
            rsp = ESP;
2181
            PUSHQ(rsp, env->segs[R_CS].selector);
2182
            PUSHQ(rsp, next_eip);
2183
            /* from this point, not restartable */
2184
            ESP = rsp;
2185
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2186
                                   get_seg_base(e1, e2),
2187
                                   get_seg_limit(e1, e2), e2);
2188
            EIP = new_eip;
2189
        } else
2190
#endif
2191
        {
2192
            sp = ESP;
2193
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2194
            ssp = env->segs[R_SS].base;
2195
            if (shift) {
2196
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2197
                PUSHL(ssp, sp, sp_mask, next_eip);
2198
            } else {
2199
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2200
                PUSHW(ssp, sp, sp_mask, next_eip);
2201
            }
2202

    
2203
            limit = get_seg_limit(e1, e2);
2204
            if (new_eip > limit)
2205
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2206
            /* from this point, not restartable */
2207
            SET_ESP(sp, sp_mask);
2208
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2209
                                   get_seg_base(e1, e2), limit, e2);
2210
            EIP = new_eip;
2211
        }
2212
    } else {
2213
        /* check gate type */
2214
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2215
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2216
        rpl = new_cs & 3;
2217
        switch(type) {
2218
        case 1: /* available 286 TSS */
2219
        case 9: /* available 386 TSS */
2220
        case 5: /* task gate */
2221
            if (dpl < cpl || dpl < rpl)
2222
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2224
            CC_OP = CC_OP_EFLAGS;
2225
            return;
2226
        case 4: /* 286 call gate */
2227
        case 12: /* 386 call gate */
2228
            break;
2229
        default:
2230
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231
            break;
2232
        }
2233
        shift = type >> 3;
2234

    
2235
        if (dpl < cpl || dpl < rpl)
2236
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
        /* check valid bit */
2238
        if (!(e2 & DESC_P_MASK))
2239
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2240
        selector = e1 >> 16;
2241
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2242
        param_count = e2 & 0x1f;
2243
        if ((selector & 0xfffc) == 0)
2244
            raise_exception_err(EXCP0D_GPF, 0);
2245

    
2246
        if (load_segment(&e1, &e2, selector) != 0)
2247
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2249
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2250
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2251
        if (dpl > cpl)
2252
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2253
        if (!(e2 & DESC_P_MASK))
2254
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2255

    
2256
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2257
            /* to inner privilege */
2258
            get_ss_esp_from_tss(&ss, &sp, dpl);
2259
#ifdef DEBUG_PCALL
2260
            if (loglevel & CPU_LOG_PCALL)
2261
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2262
                        ss, sp, param_count, ESP);
2263
#endif
2264
            if ((ss & 0xfffc) == 0)
2265
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2266
            if ((ss & 3) != dpl)
2267
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2268
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2269
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2270
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2271
            if (ss_dpl != dpl)
2272
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2273
            if (!(ss_e2 & DESC_S_MASK) ||
2274
                (ss_e2 & DESC_CS_MASK) ||
2275
                !(ss_e2 & DESC_W_MASK))
2276
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2277
            if (!(ss_e2 & DESC_P_MASK))
2278
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2279

    
2280
            //            push_size = ((param_count * 2) + 8) << shift;
2281

    
2282
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2283
            old_ssp = env->segs[R_SS].base;
2284

    
2285
            sp_mask = get_sp_mask(ss_e2);
2286
            ssp = get_seg_base(ss_e1, ss_e2);
2287
            if (shift) {
2288
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2289
                PUSHL(ssp, sp, sp_mask, ESP);
2290
                for(i = param_count - 1; i >= 0; i--) {
2291
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2292
                    PUSHL(ssp, sp, sp_mask, val);
2293
                }
2294
            } else {
2295
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296
                PUSHW(ssp, sp, sp_mask, ESP);
2297
                for(i = param_count - 1; i >= 0; i--) {
2298
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2299
                    PUSHW(ssp, sp, sp_mask, val);
2300
                }
2301
            }
2302
            new_stack = 1;
2303
        } else {
2304
            /* to same privilege */
2305
            sp = ESP;
2306
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2307
            ssp = env->segs[R_SS].base;
2308
            //            push_size = (4 << shift);
2309
            new_stack = 0;
2310
        }
2311

    
2312
        if (shift) {
2313
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2314
            PUSHL(ssp, sp, sp_mask, next_eip);
2315
        } else {
2316
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2317
            PUSHW(ssp, sp, sp_mask, next_eip);
2318
        }
2319

    
2320
        /* from this point, not restartable */
2321

    
2322
        if (new_stack) {
2323
            ss = (ss & ~3) | dpl;
2324
            cpu_x86_load_seg_cache(env, R_SS, ss,
2325
                                   ssp,
2326
                                   get_seg_limit(ss_e1, ss_e2),
2327
                                   ss_e2);
2328
        }
2329

    
2330
        selector = (selector & ~3) | dpl;
2331
        cpu_x86_load_seg_cache(env, R_CS, selector,
2332
                       get_seg_base(e1, e2),
2333
                       get_seg_limit(e1, e2),
2334
                       e2);
2335
        cpu_x86_set_cpl(env, dpl);
2336
        SET_ESP(sp, sp_mask);
2337
        EIP = offset;
2338
    }
2339
#ifdef USE_KQEMU
2340
    if (kqemu_is_ok(env)) {
2341
        env->exception_index = -1;
2342
        cpu_loop_exit();
2343
    }
2344
#endif
2345
}
2346

    
2347
/* real and vm86 mode iret */
2348
void helper_iret_real(int shift)
2349
{
2350
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2351
    target_ulong ssp;
2352
    int eflags_mask;
2353

    
2354
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2355
    sp = ESP;
2356
    ssp = env->segs[R_SS].base;
2357
    if (shift == 1) {
2358
        /* 32 bits */
2359
        POPL(ssp, sp, sp_mask, new_eip);
2360
        POPL(ssp, sp, sp_mask, new_cs);
2361
        new_cs &= 0xffff;
2362
        POPL(ssp, sp, sp_mask, new_eflags);
2363
    } else {
2364
        /* 16 bits */
2365
        POPW(ssp, sp, sp_mask, new_eip);
2366
        POPW(ssp, sp, sp_mask, new_cs);
2367
        POPW(ssp, sp, sp_mask, new_eflags);
2368
    }
2369
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2370
    load_seg_vm(R_CS, new_cs);
2371
    env->eip = new_eip;
2372
    if (env->eflags & VM_MASK)
2373
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2374
    else
2375
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2376
    if (shift == 0)
2377
        eflags_mask &= 0xffff;
2378
    load_eflags(new_eflags, eflags_mask);
2379
}
2380

    
2381
static inline void validate_seg(int seg_reg, int cpl)
2382
{
2383
    int dpl;
2384
    uint32_t e2;
2385

    
2386
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2387
       they may still contain a valid base. I would be interested to
2388
       know how a real x86_64 CPU behaves */
2389
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2390
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2391
        return;
2392

    
2393
    e2 = env->segs[seg_reg].flags;
2394
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2395
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2396
        /* data or non conforming code segment */
2397
        if (dpl < cpl) {
2398
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2399
        }
2400
    }
2401
}
2402

    
2403
/* protected mode iret */
2404
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2405
{
2406
    uint32_t new_cs, new_eflags, new_ss;
2407
    uint32_t new_es, new_ds, new_fs, new_gs;
2408
    uint32_t e1, e2, ss_e1, ss_e2;
2409
    int cpl, dpl, rpl, eflags_mask, iopl;
2410
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2411

    
2412
#ifdef TARGET_X86_64
2413
    if (shift == 2)
2414
        sp_mask = -1;
2415
    else
2416
#endif
2417
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2418
    sp = ESP;
2419
    ssp = env->segs[R_SS].base;
2420
    new_eflags = 0; /* avoid warning */
2421
#ifdef TARGET_X86_64
2422
    if (shift == 2) {
2423
        POPQ(sp, new_eip);
2424
        POPQ(sp, new_cs);
2425
        new_cs &= 0xffff;
2426
        if (is_iret) {
2427
            POPQ(sp, new_eflags);
2428
        }
2429
    } else
2430
#endif
2431
    if (shift == 1) {
2432
        /* 32 bits */
2433
        POPL(ssp, sp, sp_mask, new_eip);
2434
        POPL(ssp, sp, sp_mask, new_cs);
2435
        new_cs &= 0xffff;
2436
        if (is_iret) {
2437
            POPL(ssp, sp, sp_mask, new_eflags);
2438
            if (new_eflags & VM_MASK)
2439
                goto return_to_vm86;
2440
        }
2441
    } else {
2442
        /* 16 bits */
2443
        POPW(ssp, sp, sp_mask, new_eip);
2444
        POPW(ssp, sp, sp_mask, new_cs);
2445
        if (is_iret)
2446
            POPW(ssp, sp, sp_mask, new_eflags);
2447
    }
2448
#ifdef DEBUG_PCALL
2449
    if (loglevel & CPU_LOG_PCALL) {
2450
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2451
                new_cs, new_eip, shift, addend);
2452
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2453
    }
2454
#endif
2455
    if ((new_cs & 0xfffc) == 0)
2456
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2457
    if (load_segment(&e1, &e2, new_cs) != 0)
2458
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459
    if (!(e2 & DESC_S_MASK) ||
2460
        !(e2 & DESC_CS_MASK))
2461
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2462
    cpl = env->hflags & HF_CPL_MASK;
2463
    rpl = new_cs & 3;
2464
    if (rpl < cpl)
2465
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2466
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2467
    if (e2 & DESC_C_MASK) {
2468
        if (dpl > rpl)
2469
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2470
    } else {
2471
        if (dpl != rpl)
2472
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2473
    }
2474
    if (!(e2 & DESC_P_MASK))
2475
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2476

    
2477
    sp += addend;
2478
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2479
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2480
        /* return to same priledge level */
2481
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2482
                       get_seg_base(e1, e2),
2483
                       get_seg_limit(e1, e2),
2484
                       e2);
2485
    } else {
2486
        /* return to different privilege level */
2487
#ifdef TARGET_X86_64
2488
        if (shift == 2) {
2489
            POPQ(sp, new_esp);
2490
            POPQ(sp, new_ss);
2491
            new_ss &= 0xffff;
2492
        } else
2493
#endif
2494
        if (shift == 1) {
2495
            /* 32 bits */
2496
            POPL(ssp, sp, sp_mask, new_esp);
2497
            POPL(ssp, sp, sp_mask, new_ss);
2498
            new_ss &= 0xffff;
2499
        } else {
2500
            /* 16 bits */
2501
            POPW(ssp, sp, sp_mask, new_esp);
2502
            POPW(ssp, sp, sp_mask, new_ss);
2503
        }
2504
#ifdef DEBUG_PCALL
2505
        if (loglevel & CPU_LOG_PCALL) {
2506
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2507
                    new_ss, new_esp);
2508
        }
2509
#endif
2510
        if ((new_ss & 0xfffc) == 0) {
2511
#ifdef TARGET_X86_64
2512
            /* NULL ss is allowed in long mode if cpl != 3*/
2513
            /* XXX: test CS64 ? */
2514
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2515
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2516
                                       0, 0xffffffff,
2517
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2518
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2519
                                       DESC_W_MASK | DESC_A_MASK);
2520
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2521
            } else
2522
#endif
2523
            {
2524
                raise_exception_err(EXCP0D_GPF, 0);
2525
            }
2526
        } else {
2527
            if ((new_ss & 3) != rpl)
2528
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2529
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2530
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2531
            if (!(ss_e2 & DESC_S_MASK) ||
2532
                (ss_e2 & DESC_CS_MASK) ||
2533
                !(ss_e2 & DESC_W_MASK))
2534
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2535
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2536
            if (dpl != rpl)
2537
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2538
            if (!(ss_e2 & DESC_P_MASK))
2539
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2540
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2541
                                   get_seg_base(ss_e1, ss_e2),
2542
                                   get_seg_limit(ss_e1, ss_e2),
2543
                                   ss_e2);
2544
        }
2545

    
2546
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2547
                       get_seg_base(e1, e2),
2548
                       get_seg_limit(e1, e2),
2549
                       e2);
2550
        cpu_x86_set_cpl(env, rpl);
2551
        sp = new_esp;
2552
#ifdef TARGET_X86_64
2553
        if (env->hflags & HF_CS64_MASK)
2554
            sp_mask = -1;
2555
        else
2556
#endif
2557
            sp_mask = get_sp_mask(ss_e2);
2558

    
2559
        /* validate data segments */
2560
        validate_seg(R_ES, rpl);
2561
        validate_seg(R_DS, rpl);
2562
        validate_seg(R_FS, rpl);
2563
        validate_seg(R_GS, rpl);
2564

    
2565
        sp += addend;
2566
    }
2567
    SET_ESP(sp, sp_mask);
2568
    env->eip = new_eip;
2569
    if (is_iret) {
2570
        /* NOTE: 'cpl' is the _old_ CPL */
2571
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2572
        if (cpl == 0)
2573
            eflags_mask |= IOPL_MASK;
2574
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2575
        if (cpl <= iopl)
2576
            eflags_mask |= IF_MASK;
2577
        if (shift == 0)
2578
            eflags_mask &= 0xffff;
2579
        load_eflags(new_eflags, eflags_mask);
2580
    }
2581
    return;
2582

    
2583
 return_to_vm86:
2584
    POPL(ssp, sp, sp_mask, new_esp);
2585
    POPL(ssp, sp, sp_mask, new_ss);
2586
    POPL(ssp, sp, sp_mask, new_es);
2587
    POPL(ssp, sp, sp_mask, new_ds);
2588
    POPL(ssp, sp, sp_mask, new_fs);
2589
    POPL(ssp, sp, sp_mask, new_gs);
2590

    
2591
    /* modify processor state */
2592
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2593
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2594
    load_seg_vm(R_CS, new_cs & 0xffff);
2595
    cpu_x86_set_cpl(env, 3);
2596
    load_seg_vm(R_SS, new_ss & 0xffff);
2597
    load_seg_vm(R_ES, new_es & 0xffff);
2598
    load_seg_vm(R_DS, new_ds & 0xffff);
2599
    load_seg_vm(R_FS, new_fs & 0xffff);
2600
    load_seg_vm(R_GS, new_gs & 0xffff);
2601

    
2602
    env->eip = new_eip & 0xffff;
2603
    ESP = new_esp;
2604
}
2605

    
2606
void helper_iret_protected(int shift, int next_eip)
2607
{
2608
    int tss_selector, type;
2609
    uint32_t e1, e2;
2610

    
2611
    /* specific case for TSS */
2612
    if (env->eflags & NT_MASK) {
2613
#ifdef TARGET_X86_64
2614
        if (env->hflags & HF_LMA_MASK)
2615
            raise_exception_err(EXCP0D_GPF, 0);
2616
#endif
2617
        tss_selector = lduw_kernel(env->tr.base + 0);
2618
        if (tss_selector & 4)
2619
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2620
        if (load_segment(&e1, &e2, tss_selector) != 0)
2621
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2622
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2623
        /* NOTE: we check both segment and busy TSS */
2624
        if (type != 3)
2625
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2626
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2627
    } else {
2628
        helper_ret_protected(shift, 1, 0);
2629
    }
2630
#ifdef USE_KQEMU
2631
    if (kqemu_is_ok(env)) {
2632
        CC_OP = CC_OP_EFLAGS;
2633
        env->exception_index = -1;
2634
        cpu_loop_exit();
2635
    }
2636
#endif
2637
}
2638

    
2639
void helper_lret_protected(int shift, int addend)
2640
{
2641
    helper_ret_protected(shift, 0, addend);
2642
#ifdef USE_KQEMU
2643
    if (kqemu_is_ok(env)) {
2644
        env->exception_index = -1;
2645
        cpu_loop_exit();
2646
    }
2647
#endif
2648
}
2649

    
2650
void helper_sysenter(void)
2651
{
2652
    if (env->sysenter_cs == 0) {
2653
        raise_exception_err(EXCP0D_GPF, 0);
2654
    }
2655
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2656
    cpu_x86_set_cpl(env, 0);
2657
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2658
                           0, 0xffffffff,
2659
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2660
                           DESC_S_MASK |
2661
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2662
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2663
                           0, 0xffffffff,
2664
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2665
                           DESC_S_MASK |
2666
                           DESC_W_MASK | DESC_A_MASK);
2667
    ESP = env->sysenter_esp;
2668
    EIP = env->sysenter_eip;
2669
}
2670

    
2671
void helper_sysexit(void)
2672
{
2673
    int cpl;
2674

    
2675
    cpl = env->hflags & HF_CPL_MASK;
2676
    if (env->sysenter_cs == 0 || cpl != 0) {
2677
        raise_exception_err(EXCP0D_GPF, 0);
2678
    }
2679
    cpu_x86_set_cpl(env, 3);
2680
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2681
                           0, 0xffffffff,
2682
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2684
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2685
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2686
                           0, 0xffffffff,
2687
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2688
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2689
                           DESC_W_MASK | DESC_A_MASK);
2690
    ESP = ECX;
2691
    EIP = EDX;
2692
#ifdef USE_KQEMU
2693
    if (kqemu_is_ok(env)) {
2694
        env->exception_index = -1;
2695
        cpu_loop_exit();
2696
    }
2697
#endif
2698
}
2699

    
2700
void helper_movl_crN_T0(int reg)
2701
{
2702
#if !defined(CONFIG_USER_ONLY)
2703
    switch(reg) {
2704
    case 0:
2705
        cpu_x86_update_cr0(env, T0);
2706
        break;
2707
    case 3:
2708
        cpu_x86_update_cr3(env, T0);
2709
        break;
2710
    case 4:
2711
        cpu_x86_update_cr4(env, T0);
2712
        break;
2713
    case 8:
2714
        cpu_set_apic_tpr(env, T0);
2715
        break;
2716
    default:
2717
        env->cr[reg] = T0;
2718
        break;
2719
    }
2720
#endif
2721
}
2722

    
2723
/* XXX: do more */
2724
void helper_movl_drN_T0(int reg)
2725
{
2726
    env->dr[reg] = T0;
2727
}
2728

    
2729
void helper_invlpg(target_ulong addr)
2730
{
2731
    cpu_x86_flush_tlb(env, addr);
2732
}
2733

    
2734
void helper_rdtsc(void)
2735
{
2736
    uint64_t val;
2737

    
2738
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2739
        raise_exception(EXCP0D_GPF);
2740
    }
2741
    val = cpu_get_tsc(env);
2742
    EAX = (uint32_t)(val);
2743
    EDX = (uint32_t)(val >> 32);
2744
}
2745

    
2746
#if defined(CONFIG_USER_ONLY)
2747
void helper_wrmsr(void)
2748
{
2749
}
2750

    
2751
void helper_rdmsr(void)
2752
{
2753
}
2754
#else
2755
void helper_wrmsr(void)
2756
{
2757
    uint64_t val;
2758

    
2759
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2760

    
2761
    switch((uint32_t)ECX) {
2762
    case MSR_IA32_SYSENTER_CS:
2763
        env->sysenter_cs = val & 0xffff;
2764
        break;
2765
    case MSR_IA32_SYSENTER_ESP:
2766
        env->sysenter_esp = val;
2767
        break;
2768
    case MSR_IA32_SYSENTER_EIP:
2769
        env->sysenter_eip = val;
2770
        break;
2771
    case MSR_IA32_APICBASE:
2772
        cpu_set_apic_base(env, val);
2773
        break;
2774
    case MSR_EFER:
2775
        {
2776
            uint64_t update_mask;
2777
            update_mask = 0;
2778
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2779
                update_mask |= MSR_EFER_SCE;
2780
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2781
                update_mask |= MSR_EFER_LME;
2782
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2783
                update_mask |= MSR_EFER_FFXSR;
2784
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2785
                update_mask |= MSR_EFER_NXE;
2786
            env->efer = (env->efer & ~update_mask) |
2787
            (val & update_mask);
2788
        }
2789
        break;
2790
    case MSR_STAR:
2791
        env->star = val;
2792
        break;
2793
    case MSR_PAT:
2794
        env->pat = val;
2795
        break;
2796
    case MSR_VM_HSAVE_PA:
2797
        env->vm_hsave = val;
2798
        break;
2799
#ifdef TARGET_X86_64
2800
    case MSR_LSTAR:
2801
        env->lstar = val;
2802
        break;
2803
    case MSR_CSTAR:
2804
        env->cstar = val;
2805
        break;
2806
    case MSR_FMASK:
2807
        env->fmask = val;
2808
        break;
2809
    case MSR_FSBASE:
2810
        env->segs[R_FS].base = val;
2811
        break;
2812
    case MSR_GSBASE:
2813
        env->segs[R_GS].base = val;
2814
        break;
2815
    case MSR_KERNELGSBASE:
2816
        env->kernelgsbase = val;
2817
        break;
2818
#endif
2819
    default:
2820
        /* XXX: exception ? */
2821
        break;
2822
    }
2823
}
2824

    
2825
void helper_rdmsr(void)
2826
{
2827
    uint64_t val;
2828
    switch((uint32_t)ECX) {
2829
    case MSR_IA32_SYSENTER_CS:
2830
        val = env->sysenter_cs;
2831
        break;
2832
    case MSR_IA32_SYSENTER_ESP:
2833
        val = env->sysenter_esp;
2834
        break;
2835
    case MSR_IA32_SYSENTER_EIP:
2836
        val = env->sysenter_eip;
2837
        break;
2838
    case MSR_IA32_APICBASE:
2839
        val = cpu_get_apic_base(env);
2840
        break;
2841
    case MSR_EFER:
2842
        val = env->efer;
2843
        break;
2844
    case MSR_STAR:
2845
        val = env->star;
2846
        break;
2847
    case MSR_PAT:
2848
        val = env->pat;
2849
        break;
2850
    case MSR_VM_HSAVE_PA:
2851
        val = env->vm_hsave;
2852
        break;
2853
#ifdef TARGET_X86_64
2854
    case MSR_LSTAR:
2855
        val = env->lstar;
2856
        break;
2857
    case MSR_CSTAR:
2858
        val = env->cstar;
2859
        break;
2860
    case MSR_FMASK:
2861
        val = env->fmask;
2862
        break;
2863
    case MSR_FSBASE:
2864
        val = env->segs[R_FS].base;
2865
        break;
2866
    case MSR_GSBASE:
2867
        val = env->segs[R_GS].base;
2868
        break;
2869
    case MSR_KERNELGSBASE:
2870
        val = env->kernelgsbase;
2871
        break;
2872
#endif
2873
    default:
2874
        /* XXX: exception ? */
2875
        val = 0;
2876
        break;
2877
    }
2878
    EAX = (uint32_t)(val);
2879
    EDX = (uint32_t)(val >> 32);
2880
}
2881
#endif
2882

    
2883
void helper_lsl(void)
2884
{
2885
    unsigned int selector, limit;
2886
    uint32_t e1, e2, eflags;
2887
    int rpl, dpl, cpl, type;
2888

    
2889
    eflags = cc_table[CC_OP].compute_all();
2890
    selector = T0 & 0xffff;
2891
    if (load_segment(&e1, &e2, selector) != 0)
2892
        goto fail;
2893
    rpl = selector & 3;
2894
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2895
    cpl = env->hflags & HF_CPL_MASK;
2896
    if (e2 & DESC_S_MASK) {
2897
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2898
            /* conforming */
2899
        } else {
2900
            if (dpl < cpl || dpl < rpl)
2901
                goto fail;
2902
        }
2903
    } else {
2904
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2905
        switch(type) {
2906
        case 1:
2907
        case 2:
2908
        case 3:
2909
        case 9:
2910
        case 11:
2911
            break;
2912
        default:
2913
            goto fail;
2914
        }
2915
        if (dpl < cpl || dpl < rpl) {
2916
        fail:
2917
            CC_SRC = eflags & ~CC_Z;
2918
            return;
2919
        }
2920
    }
2921
    limit = get_seg_limit(e1, e2);
2922
    T1 = limit;
2923
    CC_SRC = eflags | CC_Z;
2924
}
2925

    
2926
void helper_lar(void)
2927
{
2928
    unsigned int selector;
2929
    uint32_t e1, e2, eflags;
2930
    int rpl, dpl, cpl, type;
2931

    
2932
    eflags = cc_table[CC_OP].compute_all();
2933
    selector = T0 & 0xffff;
2934
    if ((selector & 0xfffc) == 0)
2935
        goto fail;
2936
    if (load_segment(&e1, &e2, selector) != 0)
2937
        goto fail;
2938
    rpl = selector & 3;
2939
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2940
    cpl = env->hflags & HF_CPL_MASK;
2941
    if (e2 & DESC_S_MASK) {
2942
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2943
            /* conforming */
2944
        } else {
2945
            if (dpl < cpl || dpl < rpl)
2946
                goto fail;
2947
        }
2948
    } else {
2949
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2950
        switch(type) {
2951
        case 1:
2952
        case 2:
2953
        case 3:
2954
        case 4:
2955
        case 5:
2956
        case 9:
2957
        case 11:
2958
        case 12:
2959
            break;
2960
        default:
2961
            goto fail;
2962
        }
2963
        if (dpl < cpl || dpl < rpl) {
2964
        fail:
2965
            CC_SRC = eflags & ~CC_Z;
2966
            return;
2967
        }
2968
    }
2969
    T1 = e2 & 0x00f0ff00;
2970
    CC_SRC = eflags | CC_Z;
2971
}
2972

    
2973
void helper_verr(void)
2974
{
2975
    unsigned int selector;
2976
    uint32_t e1, e2, eflags;
2977
    int rpl, dpl, cpl;
2978

    
2979
    eflags = cc_table[CC_OP].compute_all();
2980
    selector = T0 & 0xffff;
2981
    if ((selector & 0xfffc) == 0)
2982
        goto fail;
2983
    if (load_segment(&e1, &e2, selector) != 0)
2984
        goto fail;
2985
    if (!(e2 & DESC_S_MASK))
2986
        goto fail;
2987
    rpl = selector & 3;
2988
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2989
    cpl = env->hflags & HF_CPL_MASK;
2990
    if (e2 & DESC_CS_MASK) {
2991
        if (!(e2 & DESC_R_MASK))
2992
            goto fail;
2993
        if (!(e2 & DESC_C_MASK)) {
2994
            if (dpl < cpl || dpl < rpl)
2995
                goto fail;
2996
        }
2997
    } else {
2998
        if (dpl < cpl || dpl < rpl) {
2999
        fail:
3000
            CC_SRC = eflags & ~CC_Z;
3001
            return;
3002
        }
3003
    }
3004
    CC_SRC = eflags | CC_Z;
3005
}
3006

    
3007
void helper_verw(void)
3008
{
3009
    unsigned int selector;
3010
    uint32_t e1, e2, eflags;
3011
    int rpl, dpl, cpl;
3012

    
3013
    eflags = cc_table[CC_OP].compute_all();
3014
    selector = T0 & 0xffff;
3015
    if ((selector & 0xfffc) == 0)
3016
        goto fail;
3017
    if (load_segment(&e1, &e2, selector) != 0)
3018
        goto fail;
3019
    if (!(e2 & DESC_S_MASK))
3020
        goto fail;
3021
    rpl = selector & 3;
3022
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3023
    cpl = env->hflags & HF_CPL_MASK;
3024
    if (e2 & DESC_CS_MASK) {
3025
        goto fail;
3026
    } else {
3027
        if (dpl < cpl || dpl < rpl)
3028
            goto fail;
3029
        if (!(e2 & DESC_W_MASK)) {
3030
        fail:
3031
            CC_SRC = eflags & ~CC_Z;
3032
            return;
3033
        }
3034
    }
3035
    CC_SRC = eflags | CC_Z;
3036
}
3037

    
3038
/* FPU helpers */
3039

    
3040
void helper_fldt_ST0_A0(void)
3041
{
3042
    int new_fpstt;
3043
    new_fpstt = (env->fpstt - 1) & 7;
3044
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3045
    env->fpstt = new_fpstt;
3046
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3047
}
3048

    
3049
void helper_fstt_ST0_A0(void)
3050
{
3051
    helper_fstt(ST0, A0);
3052
}
3053

    
3054
static void fpu_set_exception(int mask)
3055
{
3056
    env->fpus |= mask;
3057
    if (env->fpus & (~env->fpuc & FPUC_EM))
3058
        env->fpus |= FPUS_SE | FPUS_B;
3059
}
3060

    
3061
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3062
{
3063
    if (b == 0.0)
3064
        fpu_set_exception(FPUS_ZE);
3065
    return a / b;
3066
}
3067

    
3068
void fpu_raise_exception(void)
3069
{
3070
    if (env->cr[0] & CR0_NE_MASK) {
3071
        raise_exception(EXCP10_COPR);
3072
    }
3073
#if !defined(CONFIG_USER_ONLY)
3074
    else {
3075
        cpu_set_ferr(env);
3076
    }
3077
#endif
3078
}
3079

    
3080
/* BCD ops */
3081

    
3082
void helper_fbld_ST0_A0(void)
3083
{
3084
    CPU86_LDouble tmp;
3085
    uint64_t val;
3086
    unsigned int v;
3087
    int i;
3088

    
3089
    val = 0;
3090
    for(i = 8; i >= 0; i--) {
3091
        v = ldub(A0 + i);
3092
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3093
    }
3094
    tmp = val;
3095
    if (ldub(A0 + 9) & 0x80)
3096
        tmp = -tmp;
3097
    fpush();
3098
    ST0 = tmp;
3099
}
3100

    
3101
void helper_fbst_ST0_A0(void)
3102
{
3103
    int v;
3104
    target_ulong mem_ref, mem_end;
3105
    int64_t val;
3106

    
3107
    val = floatx_to_int64(ST0, &env->fp_status);
3108
    mem_ref = A0;
3109
    mem_end = mem_ref + 9;
3110
    if (val < 0) {
3111
        stb(mem_end, 0x80);
3112
        val = -val;
3113
    } else {
3114
        stb(mem_end, 0x00);
3115
    }
3116
    while (mem_ref < mem_end) {
3117
        if (val == 0)
3118
            break;
3119
        v = val % 100;
3120
        val = val / 100;
3121
        v = ((v / 10) << 4) | (v % 10);
3122
        stb(mem_ref++, v);
3123
    }
3124
    while (mem_ref < mem_end) {
3125
        stb(mem_ref++, 0);
3126
    }
3127
}
3128

    
3129
void helper_f2xm1(void)
3130
{
3131
    ST0 = pow(2.0,ST0) - 1.0;
3132
}
3133

    
3134
void helper_fyl2x(void)
3135
{
3136
    CPU86_LDouble fptemp;
3137

    
3138
    fptemp = ST0;
3139
    if (fptemp>0.0){
3140
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3141
        ST1 *= fptemp;
3142
        fpop();
3143
    } else {
3144
        env->fpus &= (~0x4700);
3145
        env->fpus |= 0x400;
3146
    }
3147
}
3148

    
3149
void helper_fptan(void)
3150
{
3151
    CPU86_LDouble fptemp;
3152

    
3153
    fptemp = ST0;
3154
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3155
        env->fpus |= 0x400;
3156
    } else {
3157
        ST0 = tan(fptemp);
3158
        fpush();
3159
        ST0 = 1.0;
3160
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3161
        /* the above code is for  |arg| < 2**52 only */
3162
    }
3163
}
3164

    
3165
void helper_fpatan(void)
3166
{
3167
    CPU86_LDouble fptemp, fpsrcop;
3168

    
3169
    fpsrcop = ST1;
3170
    fptemp = ST0;
3171
    ST1 = atan2(fpsrcop,fptemp);
3172
    fpop();
3173
}
3174

    
3175
void helper_fxtract(void)
3176
{
3177
    CPU86_LDoubleU temp;
3178
    unsigned int expdif;
3179

    
3180
    temp.d = ST0;
3181
    expdif = EXPD(temp) - EXPBIAS;
3182
    /*DP exponent bias*/
3183
    ST0 = expdif;
3184
    fpush();
3185
    BIASEXPONENT(temp);
3186
    ST0 = temp.d;
3187
}
3188

    
3189
void helper_fprem1(void)
3190
{
3191
    CPU86_LDouble dblq, fpsrcop, fptemp;
3192
    CPU86_LDoubleU fpsrcop1, fptemp1;
3193
    int expdif;
3194
    signed long long int q;
3195

    
3196
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3197
        ST0 = 0.0 / 0.0; /* NaN */
3198
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3199
        return;
3200
    }
3201

    
3202
    fpsrcop = ST0;
3203
    fptemp = ST1;
3204
    fpsrcop1.d = fpsrcop;
3205
    fptemp1.d = fptemp;
3206
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3207

    
3208
    if (expdif < 0) {
3209
        /* optimisation? taken from the AMD docs */
3210
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3211
        /* ST0 is unchanged */
3212
        return;
3213
    }
3214

    
3215
    if (expdif < 53) {
3216
        dblq = fpsrcop / fptemp;
3217
        /* round dblq towards nearest integer */
3218
        dblq = rint(dblq);
3219
        ST0 = fpsrcop - fptemp * dblq;
3220

    
3221
        /* convert dblq to q by truncating towards zero */
3222
        if (dblq < 0.0)
3223
           q = (signed long long int)(-dblq);
3224
        else
3225
           q = (signed long long int)dblq;
3226

    
3227
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3228
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3229
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3230
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3231
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3232
    } else {
3233
        env->fpus |= 0x400;  /* C2 <-- 1 */
3234
        fptemp = pow(2.0, expdif - 50);
3235
        fpsrcop = (ST0 / ST1) / fptemp;
3236
        /* fpsrcop = integer obtained by chopping */
3237
        fpsrcop = (fpsrcop < 0.0) ?
3238
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3239
        ST0 -= (ST1 * fpsrcop * fptemp);
3240
    }
3241
}
3242

    
3243
void helper_fprem(void)
3244
{
3245
    CPU86_LDouble dblq, fpsrcop, fptemp;
3246
    CPU86_LDoubleU fpsrcop1, fptemp1;
3247
    int expdif;
3248
    signed long long int q;
3249

    
3250
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3251
       ST0 = 0.0 / 0.0; /* NaN */
3252
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3253
       return;
3254
    }
3255

    
3256
    fpsrcop = (CPU86_LDouble)ST0;
3257
    fptemp = (CPU86_LDouble)ST1;
3258
    fpsrcop1.d = fpsrcop;
3259
    fptemp1.d = fptemp;
3260
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3261

    
3262
    if (expdif < 0) {
3263
        /* optimisation? taken from the AMD docs */
3264
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3265
        /* ST0 is unchanged */
3266
        return;
3267
    }
3268

    
3269
    if ( expdif < 53 ) {
3270
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3271
        /* round dblq towards zero */
3272
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3273
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3274

    
3275
        /* convert dblq to q by truncating towards zero */
3276
        if (dblq < 0.0)
3277
           q = (signed long long int)(-dblq);
3278
        else
3279
           q = (signed long long int)dblq;
3280

    
3281
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3282
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3283
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3284
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3285
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3286
    } else {
3287
        int N = 32 + (expdif % 32); /* as per AMD docs */
3288
        env->fpus |= 0x400;  /* C2 <-- 1 */
3289
        fptemp = pow(2.0, (double)(expdif - N));
3290
        fpsrcop = (ST0 / ST1) / fptemp;
3291
        /* fpsrcop = integer obtained by chopping */
3292
        fpsrcop = (fpsrcop < 0.0) ?
3293
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3294
        ST0 -= (ST1 * fpsrcop * fptemp);
3295
    }
3296
}
3297

    
3298
void helper_fyl2xp1(void)
3299
{
3300
    CPU86_LDouble fptemp;
3301

    
3302
    fptemp = ST0;
3303
    if ((fptemp+1.0)>0.0) {
3304
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3305
        ST1 *= fptemp;
3306
        fpop();
3307
    } else {
3308
        env->fpus &= (~0x4700);
3309
        env->fpus |= 0x400;
3310
    }
3311
}
3312

    
3313
void helper_fsqrt(void)
3314
{
3315
    CPU86_LDouble fptemp;
3316

    
3317
    fptemp = ST0;
3318
    if (fptemp<0.0) {
3319
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3320
        env->fpus |= 0x400;
3321
    }
3322
    ST0 = sqrt(fptemp);
3323
}
3324

    
3325
void helper_fsincos(void)
3326
{
3327
    CPU86_LDouble fptemp;
3328

    
3329
    fptemp = ST0;
3330
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3331
        env->fpus |= 0x400;
3332
    } else {
3333
        ST0 = sin(fptemp);
3334
        fpush();
3335
        ST0 = cos(fptemp);
3336
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3337
        /* the above code is for  |arg| < 2**63 only */
3338
    }
3339
}
3340

    
3341
void helper_frndint(void)
3342
{
3343
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3344
}
3345

    
3346
void helper_fscale(void)
3347
{
3348
    ST0 = ldexp (ST0, (int)(ST1));
3349
}
3350

    
3351
void helper_fsin(void)
3352
{
3353
    CPU86_LDouble fptemp;
3354

    
3355
    fptemp = ST0;
3356
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3357
        env->fpus |= 0x400;
3358
    } else {
3359
        ST0 = sin(fptemp);
3360
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3361
        /* the above code is for  |arg| < 2**53 only */
3362
    }
3363
}
3364

    
3365
void helper_fcos(void)
3366
{
3367
    CPU86_LDouble fptemp;
3368

    
3369
    fptemp = ST0;
3370
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3371
        env->fpus |= 0x400;
3372
    } else {
3373
        ST0 = cos(fptemp);
3374
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3375
        /* the above code is for  |arg5 < 2**63 only */
3376
    }
3377
}
3378

    
3379
void helper_fxam_ST0(void)
3380
{
3381
    CPU86_LDoubleU temp;
3382
    int expdif;
3383

    
3384
    temp.d = ST0;
3385

    
3386
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3387
    if (SIGND(temp))
3388
        env->fpus |= 0x200; /* C1 <-- 1 */
3389

    
3390
    /* XXX: test fptags too */
3391
    expdif = EXPD(temp);
3392
    if (expdif == MAXEXPD) {
3393
#ifdef USE_X86LDOUBLE
3394
        if (MANTD(temp) == 0x8000000000000000ULL)
3395
#else
3396
        if (MANTD(temp) == 0)
3397
#endif
3398
            env->fpus |=  0x500 /*Infinity*/;
3399
        else
3400
            env->fpus |=  0x100 /*NaN*/;
3401
    } else if (expdif == 0) {
3402
        if (MANTD(temp) == 0)
3403
            env->fpus |=  0x4000 /*Zero*/;
3404
        else
3405
            env->fpus |= 0x4400 /*Denormal*/;
3406
    } else {
3407
        env->fpus |= 0x400;
3408
    }
3409
}
3410

    
3411
void helper_fstenv(target_ulong ptr, int data32)
3412
{
3413
    int fpus, fptag, exp, i;
3414
    uint64_t mant;
3415
    CPU86_LDoubleU tmp;
3416

    
3417
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3418
    fptag = 0;
3419
    for (i=7; i>=0; i--) {
3420
        fptag <<= 2;
3421
        if (env->fptags[i]) {
3422
            fptag |= 3;
3423
        } else {
3424
            tmp.d = env->fpregs[i].d;
3425
            exp = EXPD(tmp);
3426
            mant = MANTD(tmp);
3427
            if (exp == 0 && mant == 0) {
3428
                /* zero */
3429
                fptag |= 1;
3430
            } else if (exp == 0 || exp == MAXEXPD
3431
#ifdef USE_X86LDOUBLE
3432
                       || (mant & (1LL << 63)) == 0
3433
#endif
3434
                       ) {
3435
                /* NaNs, infinity, denormal */
3436
                fptag |= 2;
3437
            }
3438
        }
3439
    }
3440
    if (data32) {
3441
        /* 32 bit */
3442
        stl(ptr, env->fpuc);
3443
        stl(ptr + 4, fpus);
3444
        stl(ptr + 8, fptag);
3445
        stl(ptr + 12, 0); /* fpip */
3446
        stl(ptr + 16, 0); /* fpcs */
3447
        stl(ptr + 20, 0); /* fpoo */
3448
        stl(ptr + 24, 0); /* fpos */
3449
    } else {
3450
        /* 16 bit */
3451
        stw(ptr, env->fpuc);
3452
        stw(ptr + 2, fpus);
3453
        stw(ptr + 4, fptag);
3454
        stw(ptr + 6, 0);
3455
        stw(ptr + 8, 0);
3456
        stw(ptr + 10, 0);
3457
        stw(ptr + 12, 0);
3458
    }
3459
}
3460

    
3461
void helper_fldenv(target_ulong ptr, int data32)
3462
{
3463
    int i, fpus, fptag;
3464

    
3465
    if (data32) {
3466
        env->fpuc = lduw(ptr);
3467
        fpus = lduw(ptr + 4);
3468
        fptag = lduw(ptr + 8);
3469
    }
3470
    else {
3471
        env->fpuc = lduw(ptr);
3472
        fpus = lduw(ptr + 2);
3473
        fptag = lduw(ptr + 4);
3474
    }
3475
    env->fpstt = (fpus >> 11) & 7;
3476
    env->fpus = fpus & ~0x3800;
3477
    for(i = 0;i < 8; i++) {
3478
        env->fptags[i] = ((fptag & 3) == 3);
3479
        fptag >>= 2;
3480
    }
3481
}
3482

    
3483
void helper_fsave(target_ulong ptr, int data32)
3484
{
3485
    CPU86_LDouble tmp;
3486
    int i;
3487

    
3488
    helper_fstenv(ptr, data32);
3489

    
3490
    ptr += (14 << data32);
3491
    for(i = 0;i < 8; i++) {
3492
        tmp = ST(i);
3493
        helper_fstt(tmp, ptr);
3494
        ptr += 10;
3495
    }
3496

    
3497
    /* fninit */
3498
    env->fpus = 0;
3499
    env->fpstt = 0;
3500
    env->fpuc = 0x37f;
3501
    env->fptags[0] = 1;
3502
    env->fptags[1] = 1;
3503
    env->fptags[2] = 1;
3504
    env->fptags[3] = 1;
3505
    env->fptags[4] = 1;
3506
    env->fptags[5] = 1;
3507
    env->fptags[6] = 1;
3508
    env->fptags[7] = 1;
3509
}
3510

    
3511
void helper_frstor(target_ulong ptr, int data32)
3512
{
3513
    CPU86_LDouble tmp;
3514
    int i;
3515

    
3516
    helper_fldenv(ptr, data32);
3517
    ptr += (14 << data32);
3518

    
3519
    for(i = 0;i < 8; i++) {
3520
        tmp = helper_fldt(ptr);
3521
        ST(i) = tmp;
3522
        ptr += 10;
3523
    }
3524
}
3525

    
3526
void helper_fxsave(target_ulong ptr, int data64)
3527
{
3528
    int fpus, fptag, i, nb_xmm_regs;
3529
    CPU86_LDouble tmp;
3530
    target_ulong addr;
3531

    
3532
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3533
    fptag = 0;
3534
    for(i = 0; i < 8; i++) {
3535
        fptag |= (env->fptags[i] << i);
3536
    }
3537
    stw(ptr, env->fpuc);
3538
    stw(ptr + 2, fpus);
3539
    stw(ptr + 4, fptag ^ 0xff);
3540

    
3541
    addr = ptr + 0x20;
3542
    for(i = 0;i < 8; i++) {
3543
        tmp = ST(i);
3544
        helper_fstt(tmp, addr);
3545
        addr += 16;
3546
    }
3547

    
3548
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3549
        /* XXX: finish it */
3550
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3551
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3552
        nb_xmm_regs = 8 << data64;
3553
        addr = ptr + 0xa0;
3554
        for(i = 0; i < nb_xmm_regs; i++) {
3555
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3556
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3557
            addr += 16;
3558
        }
3559
    }
3560
}
3561

    
3562
void helper_fxrstor(target_ulong ptr, int data64)
3563
{
3564
    int i, fpus, fptag, nb_xmm_regs;
3565
    CPU86_LDouble tmp;
3566
    target_ulong addr;
3567

    
3568
    env->fpuc = lduw(ptr);
3569
    fpus = lduw(ptr + 2);
3570
    fptag = lduw(ptr + 4);
3571
    env->fpstt = (fpus >> 11) & 7;
3572
    env->fpus = fpus & ~0x3800;
3573
    fptag ^= 0xff;
3574
    for(i = 0;i < 8; i++) {
3575
        env->fptags[i] = ((fptag >> i) & 1);
3576
    }
3577

    
3578
    addr = ptr + 0x20;
3579
    for(i = 0;i < 8; i++) {
3580
        tmp = helper_fldt(addr);
3581
        ST(i) = tmp;
3582
        addr += 16;
3583
    }
3584

    
3585
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3586
        /* XXX: finish it */
3587
        env->mxcsr = ldl(ptr + 0x18);
3588
        //ldl(ptr + 0x1c);
3589
        nb_xmm_regs = 8 << data64;
3590
        addr = ptr + 0xa0;
3591
        for(i = 0; i < nb_xmm_regs; i++) {
3592
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3593
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3594
            addr += 16;
3595
        }
3596
    }
3597
}
3598

    
3599
#ifndef USE_X86LDOUBLE
3600

    
3601
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3602
{
3603
    CPU86_LDoubleU temp;
3604
    int e;
3605

    
3606
    temp.d = f;
3607
    /* mantissa */
3608
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3609
    /* exponent + sign */
3610
    e = EXPD(temp) - EXPBIAS + 16383;
3611
    e |= SIGND(temp) >> 16;
3612
    *pexp = e;
3613
}
3614

    
3615
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3616
{
3617
    CPU86_LDoubleU temp;
3618
    int e;
3619
    uint64_t ll;
3620

    
3621
    /* XXX: handle overflow ? */
3622
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3623
    e |= (upper >> 4) & 0x800; /* sign */
3624
    ll = (mant >> 11) & ((1LL << 52) - 1);
3625
#ifdef __arm__
3626
    temp.l.upper = (e << 20) | (ll >> 32);
3627
    temp.l.lower = ll;
3628
#else
3629
    temp.ll = ll | ((uint64_t)e << 52);
3630
#endif
3631
    return temp.d;
3632
}
3633

    
3634
#else
3635

    
3636
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3637
{
3638
    CPU86_LDoubleU temp;
3639

    
3640
    temp.d = f;
3641
    *pmant = temp.l.lower;
3642
    *pexp = temp.l.upper;
3643
}
3644

    
3645
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3646
{
3647
    CPU86_LDoubleU temp;
3648

    
3649
    temp.l.upper = upper;
3650
    temp.l.lower = mant;
3651
    return temp.d;
3652
}
3653
#endif
3654

    
3655
#ifdef TARGET_X86_64
3656

    
3657
//#define DEBUG_MULDIV
3658

    
3659
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3660
{
3661
    *plow += a;
3662
    /* carry test */
3663
    if (*plow < a)
3664
        (*phigh)++;
3665
    *phigh += b;
3666
}
3667

    
3668
static void neg128(uint64_t *plow, uint64_t *phigh)
3669
{
3670
    *plow = ~ *plow;
3671
    *phigh = ~ *phigh;
3672
    add128(plow, phigh, 1, 0);
3673
}
3674

    
3675
/* return TRUE if overflow */
3676
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3677
{
3678
    uint64_t q, r, a1, a0;
3679
    int i, qb, ab;
3680

    
3681
    a0 = *plow;
3682
    a1 = *phigh;
3683
    if (a1 == 0) {
3684
        q = a0 / b;
3685
        r = a0 % b;
3686
        *plow = q;
3687
        *phigh = r;
3688
    } else {
3689
        if (a1 >= b)
3690
            return 1;
3691
        /* XXX: use a better algorithm */
3692
        for(i = 0; i < 64; i++) {
3693
            ab = a1 >> 63;
3694
            a1 = (a1 << 1) | (a0 >> 63);
3695
            if (ab || a1 >= b) {
3696
                a1 -= b;
3697
                qb = 1;
3698
            } else {
3699
                qb = 0;
3700
            }
3701
            a0 = (a0 << 1) | qb;
3702
        }
3703
#if defined(DEBUG_MULDIV)
3704
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3705
               *phigh, *plow, b, a0, a1);
3706
#endif
3707
        *plow = a0;
3708
        *phigh = a1;
3709
    }
3710
    return 0;
3711
}
3712

    
3713
/* return TRUE if overflow */
3714
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3715
{
3716
    int sa, sb;
3717
    sa = ((int64_t)*phigh < 0);
3718
    if (sa)
3719
        neg128(plow, phigh);
3720
    sb = (b < 0);
3721
    if (sb)
3722
        b = -b;
3723
    if (div64(plow, phigh, b) != 0)
3724
        return 1;
3725
    if (sa ^ sb) {
3726
        if (*plow > (1ULL << 63))
3727
            return 1;
3728
        *plow = - *plow;
3729
    } else {
3730
        if (*plow >= (1ULL << 63))
3731
            return 1;
3732
    }
3733
    if (sa)
3734
        *phigh = - *phigh;
3735
    return 0;
3736
}
3737

    
3738
void helper_mulq_EAX_T0(void)
3739
{
3740
    uint64_t r0, r1;
3741

    
3742
    mulu64(&r0, &r1, EAX, T0);
3743
    EAX = r0;
3744
    EDX = r1;
3745
    CC_DST = r0;
3746
    CC_SRC = r1;
3747
}
3748

    
3749
void helper_imulq_EAX_T0(void)
3750
{
3751
    uint64_t r0, r1;
3752

    
3753
    muls64(&r0, &r1, EAX, T0);
3754
    EAX = r0;
3755
    EDX = r1;
3756
    CC_DST = r0;
3757
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3758
}
3759

    
3760
void helper_imulq_T0_T1(void)
3761
{
3762
    uint64_t r0, r1;
3763

    
3764
    muls64(&r0, &r1, T0, T1);
3765
    T0 = r0;
3766
    CC_DST = r0;
3767
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3768
}
3769

    
3770
void helper_divq_EAX_T0(void)
3771
{
3772
    uint64_t r0, r1;
3773
    if (T0 == 0) {
3774
        raise_exception(EXCP00_DIVZ);
3775
    }
3776
    r0 = EAX;
3777
    r1 = EDX;
3778
    if (div64(&r0, &r1, T0))
3779
        raise_exception(EXCP00_DIVZ);
3780
    EAX = r0;
3781
    EDX = r1;
3782
}
3783

    
3784
void helper_idivq_EAX_T0(void)
3785
{
3786
    uint64_t r0, r1;
3787
    if (T0 == 0) {
3788
        raise_exception(EXCP00_DIVZ);
3789
    }
3790
    r0 = EAX;
3791
    r1 = EDX;
3792
    if (idiv64(&r0, &r1, T0))
3793
        raise_exception(EXCP00_DIVZ);
3794
    EAX = r0;
3795
    EDX = r1;
3796
}
3797

    
3798
void helper_bswapq_T0(void)
3799
{
3800
    T0 = bswap64(T0);
3801
}
3802
#endif
3803

    
3804
void helper_hlt(void)
3805
{
3806
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3807
    env->hflags |= HF_HALTED_MASK;
3808
    env->exception_index = EXCP_HLT;
3809
    cpu_loop_exit();
3810
}
3811

    
3812
void helper_monitor(void)
3813
{
3814
    if ((uint32_t)ECX != 0)
3815
        raise_exception(EXCP0D_GPF);
3816
    /* XXX: store address ? */
3817
}
3818

    
3819
void helper_mwait(void)
3820
{
3821
    if ((uint32_t)ECX != 0)
3822
        raise_exception(EXCP0D_GPF);
3823
    /* XXX: not complete but not completely erroneous */
3824
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3825
        /* more than one CPU: do not sleep because another CPU may
3826
           wake this one */
3827
    } else {
3828
        helper_hlt();
3829
    }
3830
}
3831

    
3832
float approx_rsqrt(float a)
3833
{
3834
    return 1.0 / sqrt(a);
3835
}
3836

    
3837
float approx_rcp(float a)
3838
{
3839
    return 1.0 / a;
3840
}
3841

    
3842
void update_fp_status(void)
3843
{
3844
    int rnd_type;
3845

    
3846
    /* set rounding mode */
3847
    switch(env->fpuc & RC_MASK) {
3848
    default:
3849
    case RC_NEAR:
3850
        rnd_type = float_round_nearest_even;
3851
        break;
3852
    case RC_DOWN:
3853
        rnd_type = float_round_down;
3854
        break;
3855
    case RC_UP:
3856
        rnd_type = float_round_up;
3857
        break;
3858
    case RC_CHOP:
3859
        rnd_type = float_round_to_zero;
3860
        break;
3861
    }
3862
    set_float_rounding_mode(rnd_type, &env->fp_status);
3863
#ifdef FLOATX80
3864
    switch((env->fpuc >> 8) & 3) {
3865
    case 0:
3866
        rnd_type = 32;
3867
        break;
3868
    case 2:
3869
        rnd_type = 64;
3870
        break;
3871
    case 3:
3872
    default:
3873
        rnd_type = 80;
3874
        break;
3875
    }
3876
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3877
#endif
3878
}
3879

    
3880
#if !defined(CONFIG_USER_ONLY)
3881

    
3882
#define MMUSUFFIX _mmu
3883
#ifdef __s390__
3884
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3885
#else
3886
# define GETPC() (__builtin_return_address(0))
3887
#endif
3888

    
3889
#define SHIFT 0
3890
#include "softmmu_template.h"
3891

    
3892
#define SHIFT 1
3893
#include "softmmu_template.h"
3894

    
3895
#define SHIFT 2
3896
#include "softmmu_template.h"
3897

    
3898
#define SHIFT 3
3899
#include "softmmu_template.h"
3900

    
3901
#endif
3902

    
3903
/* try to fill the TLB and return an exception if error. If retaddr is
3904
   NULL, it means that the function was called in C code (i.e. not
3905
   from generated code or from helper.c) */
3906
/* XXX: fix it to restore all registers */
3907
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3908
{
3909
    TranslationBlock *tb;
3910
    int ret;
3911
    unsigned long pc;
3912
    CPUX86State *saved_env;
3913

    
3914
    /* XXX: hack to restore env in all cases, even if not called from
3915
       generated code */
3916
    saved_env = env;
3917
    env = cpu_single_env;
3918

    
3919
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3920
    if (ret) {
3921
        if (retaddr) {
3922
            /* now we have a real cpu fault */
3923
            pc = (unsigned long)retaddr;
3924
            tb = tb_find_pc(pc);
3925
            if (tb) {
3926
                /* the PC is inside the translated code. It means that we have
3927
                   a virtual CPU fault */
3928
                cpu_restore_state(tb, env, pc, NULL);
3929
            }
3930
        }
3931
        if (retaddr)
3932
            raise_exception_err(env->exception_index, env->error_code);
3933
        else
3934
            raise_exception_err_norestore(env->exception_index, env->error_code);
3935
    }
3936
    env = saved_env;
3937
}
3938

    
3939

    
3940
/* Secure Virtual Machine helpers */
3941

    
3942
void helper_stgi(void)
3943
{
3944
    env->hflags |= HF_GIF_MASK;
3945
}
3946

    
3947
void helper_clgi(void)
3948
{
3949
    env->hflags &= ~HF_GIF_MASK;
3950
}
3951

    
3952
#if defined(CONFIG_USER_ONLY)
3953

    
3954
void helper_vmrun(target_ulong addr) { }
3955
void helper_vmmcall(void) { }
3956
void helper_vmload(target_ulong addr) { }
3957
void helper_vmsave(target_ulong addr) { }
3958
void helper_skinit(void) { }
3959
void helper_invlpga(void) { }
3960
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3961
int svm_check_intercept_param(uint32_t type, uint64_t param)
3962
{
3963
    return 0;
3964
}
3965

    
3966
#else
3967

    
3968
static inline uint32_t
3969
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3970
{
3971
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3972
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3973
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3974
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3975
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3976
}
3977

    
3978
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3979
{
3980
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3981
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
3982
}
3983

    
3984
extern uint8_t *phys_ram_base;
3985
void helper_vmrun(target_ulong addr)
3986
{
3987
    uint32_t event_inj;
3988
    uint32_t int_ctl;
3989

    
3990
    if (loglevel & CPU_LOG_TB_IN_ASM)
3991
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3992

    
3993
    env->vm_vmcb = addr;
3994
    regs_to_env();
3995

    
3996
    /* save the current CPU state in the hsave page */
3997
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3998
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3999

    
4000
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4001
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4002

    
4003
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4004
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4005
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4006
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4007
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4008
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4009
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4010

    
4011
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4012
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4013

    
4014
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4015
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4016
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4017
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4018

    
4019
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4020
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4021
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4022

    
4023
    /* load the interception bitmaps so we do not need to access the
4024
       vmcb in svm mode */
4025
    /* We shift all the intercept bits so we can OR them with the TB
4026
       flags later on */
4027
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4028
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4029
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4030
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4031
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4032
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4033

    
4034
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4035
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4036

    
4037
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4038
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4039

    
4040
    /* clear exit_info_2 so we behave like the real hardware */
4041
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4042

    
4043
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4044
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4045
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4046
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4047
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4048
    if (int_ctl & V_INTR_MASKING_MASK) {
4049
        env->cr[8] = int_ctl & V_TPR_MASK;
4050
        if (env->eflags & IF_MASK)
4051
            env->hflags |= HF_HIF_MASK;
4052
    }
4053

    
4054
#ifdef TARGET_X86_64
4055
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4056
    env->hflags &= ~HF_LMA_MASK;
4057
    if (env->efer & MSR_EFER_LMA)
4058
       env->hflags |= HF_LMA_MASK;
4059
#endif
4060
    env->eflags = 0;
4061
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4062
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4063
    CC_OP = CC_OP_EFLAGS;
4064
    CC_DST = 0xffffffff;
4065

    
4066
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4067
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4068
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4069
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4070

    
4071
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4072
    env->eip = EIP;
4073
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4074
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4075
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4076
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4077
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4078

    
4079
    /* FIXME: guest state consistency checks */
4080

    
4081
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4082
        case TLB_CONTROL_DO_NOTHING:
4083
            break;
4084
        case TLB_CONTROL_FLUSH_ALL_ASID:
4085
            /* FIXME: this is not 100% correct but should work for now */
4086
            tlb_flush(env, 1);
4087
        break;
4088
    }
4089

    
4090
    helper_stgi();
4091

    
4092
    regs_to_env();
4093

    
4094
    /* maybe we need to inject an event */
4095
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4096
    if (event_inj & SVM_EVTINJ_VALID) {
4097
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4098
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4099
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4100
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4101

    
4102
        if (loglevel & CPU_LOG_TB_IN_ASM)
4103
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4104
        /* FIXME: need to implement valid_err */
4105
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4106
        case SVM_EVTINJ_TYPE_INTR:
4107
                env->exception_index = vector;
4108
                env->error_code = event_inj_err;
4109
                env->exception_is_int = 1;
4110
                env->exception_next_eip = -1;
4111
                if (loglevel & CPU_LOG_TB_IN_ASM)
4112
                    fprintf(logfile, "INTR");
4113
                break;
4114
        case SVM_EVTINJ_TYPE_NMI:
4115
                env->exception_index = vector;
4116
                env->error_code = event_inj_err;
4117
                env->exception_is_int = 1;
4118
                env->exception_next_eip = EIP;
4119
                if (loglevel & CPU_LOG_TB_IN_ASM)
4120
                    fprintf(logfile, "NMI");
4121
                break;
4122
        case SVM_EVTINJ_TYPE_EXEPT:
4123
                env->exception_index = vector;
4124
                env->error_code = event_inj_err;
4125
                env->exception_is_int = 0;
4126
                env->exception_next_eip = -1;
4127
                if (loglevel & CPU_LOG_TB_IN_ASM)
4128
                    fprintf(logfile, "EXEPT");
4129
                break;
4130
        case SVM_EVTINJ_TYPE_SOFT:
4131
                env->exception_index = vector;
4132
                env->error_code = event_inj_err;
4133
                env->exception_is_int = 1;
4134
                env->exception_next_eip = EIP;
4135
                if (loglevel & CPU_LOG_TB_IN_ASM)
4136
                    fprintf(logfile, "SOFT");
4137
                break;
4138
        }
4139
        if (loglevel & CPU_LOG_TB_IN_ASM)
4140
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4141
    }
4142
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4143
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4144
    }
4145

    
4146
    cpu_loop_exit();
4147
}
4148

    
4149
void helper_vmmcall(void)
4150
{
4151
    if (loglevel & CPU_LOG_TB_IN_ASM)
4152
        fprintf(logfile,"vmmcall!\n");
4153
}
4154

    
4155
void helper_vmload(target_ulong addr)
4156
{
4157
    if (loglevel & CPU_LOG_TB_IN_ASM)
4158
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4159
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4160
                env->segs[R_FS].base);
4161

    
4162
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4163
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4164
    SVM_LOAD_SEG2(addr, tr, tr);
4165
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4166

    
4167
#ifdef TARGET_X86_64
4168
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4169
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4170
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4171
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4172
#endif
4173
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4174
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4175
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4176
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4177
}
4178

    
4179
void helper_vmsave(target_ulong addr)
4180
{
4181
    if (loglevel & CPU_LOG_TB_IN_ASM)
4182
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4183
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4184
                env->segs[R_FS].base);
4185

    
4186
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4187
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4188
    SVM_SAVE_SEG(addr, tr, tr);
4189
    SVM_SAVE_SEG(addr, ldt, ldtr);
4190

    
4191
#ifdef TARGET_X86_64
4192
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4193
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4194
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4195
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4196
#endif
4197
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4198
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4199
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4200
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4201
}
4202

    
4203
void helper_skinit(void)
4204
{
4205
    if (loglevel & CPU_LOG_TB_IN_ASM)
4206
        fprintf(logfile,"skinit!\n");
4207
}
4208

    
4209
void helper_invlpga(void)
4210
{
4211
    tlb_flush(env, 0);
4212
}
4213

    
4214
int svm_check_intercept_param(uint32_t type, uint64_t param)
4215
{
4216
    switch(type) {
4217
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4218
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4219
            vmexit(type, param);
4220
            return 1;
4221
        }
4222
        break;
4223
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4224
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4225
            vmexit(type, param);
4226
            return 1;
4227
        }
4228
        break;
4229
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4230
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4231
            vmexit(type, param);
4232
            return 1;
4233
        }
4234
        break;
4235
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4236
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4237
            vmexit(type, param);
4238
            return 1;
4239
        }
4240
        break;
4241
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4242
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4243
            vmexit(type, param);
4244
            return 1;
4245
        }
4246
        break;
4247
    case SVM_EXIT_IOIO:
4248
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4249
            /* FIXME: this should be read in at vmrun (faster this way?) */
4250
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4251
            uint16_t port = (uint16_t) (param >> 16);
4252

    
4253
            if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4254
                vmexit(type, param);
4255
        }
4256
        break;
4257

    
4258
    case SVM_EXIT_MSR:
4259
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4260
            /* FIXME: this should be read in at vmrun (faster this way?) */
4261
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4262
            switch((uint32_t)ECX) {
4263
            case 0 ... 0x1fff:
4264
                T0 = (ECX * 2) % 8;
4265
                T1 = ECX / 8;
4266
                break;
4267
            case 0xc0000000 ... 0xc0001fff:
4268
                T0 = (8192 + ECX - 0xc0000000) * 2;
4269
                T1 = (T0 / 8);
4270
                T0 %= 8;
4271
                break;
4272
            case 0xc0010000 ... 0xc0011fff:
4273
                T0 = (16384 + ECX - 0xc0010000) * 2;
4274
                T1 = (T0 / 8);
4275
                T0 %= 8;
4276
                break;
4277
            default:
4278
                vmexit(type, param);
4279
                return 1;
4280
            }
4281
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4282
                vmexit(type, param);
4283
            return 1;
4284
        }
4285
        break;
4286
    default:
4287
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4288
            vmexit(type, param);
4289
            return 1;
4290
        }
4291
        break;
4292
    }
4293
    return 0;
4294
}
4295

    
4296
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4297
{
4298
    uint32_t int_ctl;
4299

    
4300
    if (loglevel & CPU_LOG_TB_IN_ASM)
4301
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4302
                exit_code, exit_info_1,
4303
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4304
                EIP);
4305

    
4306
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4307
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4308
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4309
    } else {
4310
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4311
    }
4312

    
4313
    /* Save the VM state in the vmcb */
4314
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4315
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4316
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4317
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4318

    
4319
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4320
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4321

    
4322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4323
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4324

    
4325
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4326
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4328
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4329
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4330

    
4331
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4332
        int_ctl &= ~V_TPR_MASK;
4333
        int_ctl |= env->cr[8] & V_TPR_MASK;
4334
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4335
    }
4336

    
4337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4338
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4339
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4340
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4342
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4343
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4344

    
4345
    /* Reload the host state from vm_hsave */
4346
    env->hflags &= ~HF_HIF_MASK;
4347
    env->intercept = 0;
4348
    env->intercept_exceptions = 0;
4349
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4350

    
4351
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4352
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4353

    
4354
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4355
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4356

    
4357
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4358
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4359
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4360
    if (int_ctl & V_INTR_MASKING_MASK)
4361
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4362
    /* we need to set the efer after the crs so the hidden flags get set properly */
4363
#ifdef TARGET_X86_64
4364
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4365
    env->hflags &= ~HF_LMA_MASK;
4366
    if (env->efer & MSR_EFER_LMA)
4367
       env->hflags |= HF_LMA_MASK;
4368
#endif
4369

    
4370
    env->eflags = 0;
4371
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4372
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4373
    CC_OP = CC_OP_EFLAGS;
4374

    
4375
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4376
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4377
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4378
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4379

    
4380
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4381
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4382
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4383

    
4384
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4385
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4386

    
4387
    /* other setups */
4388
    cpu_x86_set_cpl(env, 0);
4389
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4390
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4391
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4392

    
4393
    helper_clgi();
4394
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4395

    
4396
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4397

    
4398
    /* Clears the TSC_OFFSET inside the processor. */
4399

    
4400
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4401
       from the page table indicated the host's CR3. If the PDPEs contain
4402
       illegal state, the processor causes a shutdown. */
4403

    
4404
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4405
    env->cr[0] |= CR0_PE_MASK;
4406
    env->eflags &= ~VM_MASK;
4407

    
4408
    /* Disables all breakpoints in the host DR7 register. */
4409

    
4410
    /* Checks the reloaded host state for consistency. */
4411

    
4412
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4413
       host's code segment or non-canonical (in the case of long mode), a
4414
       #GP fault is delivered inside the host.) */
4415

    
4416
    /* remove any pending exception */
4417
    env->exception_index = -1;
4418
    env->error_code = 0;
4419
    env->old_exception = -1;
4420

    
4421
    regs_to_env();
4422
    cpu_loop_exit();
4423
}
4424

    
4425
#endif