Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ 80210bcd

History | View | Annotate | Download (129.8 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "exec.h"
21

    
22
//#define DEBUG_PCALL
23

    
24
#if 0
25
#define raise_exception_err(a, b)\
26
do {\
27
    if (logfile)\
28
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
29
    (raise_exception_err)(a, b);\
30
} while (0)
31
#endif
32

    
33
const uint8_t parity_table[256] = {
34
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
};
67

    
68
/* modulo 17 table */
69
const uint8_t rclw_table[32] = {
70
    0, 1, 2, 3, 4, 5, 6, 7,
71
    8, 9,10,11,12,13,14,15,
72
   16, 0, 1, 2, 3, 4, 5, 6,
73
    7, 8, 9,10,11,12,13,14,
74
};
75

    
76
/* modulo 9 table */
77
const uint8_t rclb_table[32] = {
78
    0, 1, 2, 3, 4, 5, 6, 7,
79
    8, 0, 1, 2, 3, 4, 5, 6,
80
    7, 8, 0, 1, 2, 3, 4, 5,
81
    6, 7, 8, 0, 1, 2, 3, 4,
82
};
83

    
84
const CPU86_LDouble f15rk[7] =
85
{
86
    0.00000000000000000000L,
87
    1.00000000000000000000L,
88
    3.14159265358979323851L,  /*pi*/
89
    0.30102999566398119523L,  /*lg2*/
90
    0.69314718055994530943L,  /*ln2*/
91
    1.44269504088896340739L,  /*l2e*/
92
    3.32192809488736234781L,  /*l2t*/
93
};
94

    
95
/* thread support */
96

    
97
spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
98

    
99
void cpu_lock(void)
100
{
101
    spin_lock(&global_cpu_lock);
102
}
103

    
104
void cpu_unlock(void)
105
{
106
    spin_unlock(&global_cpu_lock);
107
}
108

    
109
/* return non zero if error */
110
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
111
                               int selector)
112
{
113
    SegmentCache *dt;
114
    int index;
115
    target_ulong ptr;
116

    
117
    if (selector & 0x4)
118
        dt = &env->ldt;
119
    else
120
        dt = &env->gdt;
121
    index = selector & ~7;
122
    if ((index + 7) > dt->limit)
123
        return -1;
124
    ptr = dt->base + index;
125
    *e1_ptr = ldl_kernel(ptr);
126
    *e2_ptr = ldl_kernel(ptr + 4);
127
    return 0;
128
}
129

    
130
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131
{
132
    unsigned int limit;
133
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
134
    if (e2 & DESC_G_MASK)
135
        limit = (limit << 12) | 0xfff;
136
    return limit;
137
}
138

    
139
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
140
{
141
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
142
}
143

    
144
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
145
{
146
    sc->base = get_seg_base(e1, e2);
147
    sc->limit = get_seg_limit(e1, e2);
148
    sc->flags = e2;
149
}
150

    
151
/* init the segment cache in vm86 mode. */
152
static inline void load_seg_vm(int seg, int selector)
153
{
154
    selector &= 0xffff;
155
    cpu_x86_load_seg_cache(env, seg, selector,
156
                           (selector << 4), 0xffff, 0);
157
}
158

    
159
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
160
                                       uint32_t *esp_ptr, int dpl)
161
{
162
    int type, index, shift;
163

    
164
#if 0
165
    {
166
        int i;
167
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
168
        for(i=0;i<env->tr.limit;i++) {
169
            printf("%02x ", env->tr.base[i]);
170
            if ((i & 7) == 7) printf("\n");
171
        }
172
        printf("\n");
173
    }
174
#endif
175

    
176
    if (!(env->tr.flags & DESC_P_MASK))
177
        cpu_abort(env, "invalid tss");
178
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
179
    if ((type & 7) != 1)
180
        cpu_abort(env, "invalid tss type");
181
    shift = type >> 3;
182
    index = (dpl * 4 + 2) << shift;
183
    if (index + (4 << shift) - 1 > env->tr.limit)
184
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
185
    if (shift == 0) {
186
        *esp_ptr = lduw_kernel(env->tr.base + index);
187
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
188
    } else {
189
        *esp_ptr = ldl_kernel(env->tr.base + index);
190
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
191
    }
192
}
193

    
194
/* XXX: merge with load_seg() */
195
static void tss_load_seg(int seg_reg, int selector)
196
{
197
    uint32_t e1, e2;
198
    int rpl, dpl, cpl;
199

    
200
    if ((selector & 0xfffc) != 0) {
201
        if (load_segment(&e1, &e2, selector) != 0)
202
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
203
        if (!(e2 & DESC_S_MASK))
204
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205
        rpl = selector & 3;
206
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
207
        cpl = env->hflags & HF_CPL_MASK;
208
        if (seg_reg == R_CS) {
209
            if (!(e2 & DESC_CS_MASK))
210
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
211
            /* XXX: is it correct ? */
212
            if (dpl != rpl)
213
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214
            if ((e2 & DESC_C_MASK) && dpl > rpl)
215
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216
        } else if (seg_reg == R_SS) {
217
            /* SS must be writable data */
218
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
219
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220
            if (dpl != cpl || dpl != rpl)
221
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222
        } else {
223
            /* not readable code */
224
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
225
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226
            /* if data or non conforming code, checks the rights */
227
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
228
                if (dpl < cpl || dpl < rpl)
229
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
            }
231
        }
232
        if (!(e2 & DESC_P_MASK))
233
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
234
        cpu_x86_load_seg_cache(env, seg_reg, selector,
235
                       get_seg_base(e1, e2),
236
                       get_seg_limit(e1, e2),
237
                       e2);
238
    } else {
239
        if (seg_reg == R_SS || seg_reg == R_CS)
240
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
    }
242
}
243

    
244
#define SWITCH_TSS_JMP  0
245
#define SWITCH_TSS_IRET 1
246
#define SWITCH_TSS_CALL 2
247

    
248
/* XXX: restore CPU state in registers (PowerPC case) */
249
static void switch_tss(int tss_selector,
250
                       uint32_t e1, uint32_t e2, int source,
251
                       uint32_t next_eip)
252
{
253
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
254
    target_ulong tss_base;
255
    uint32_t new_regs[8], new_segs[6];
256
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
257
    uint32_t old_eflags, eflags_mask;
258
    SegmentCache *dt;
259
    int index;
260
    target_ulong ptr;
261

    
262
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
263
#ifdef DEBUG_PCALL
264
    if (loglevel & CPU_LOG_PCALL)
265
        fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
266
#endif
267

    
268
    /* if task gate, we read the TSS segment and we load it */
269
    if (type == 5) {
270
        if (!(e2 & DESC_P_MASK))
271
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
272
        tss_selector = e1 >> 16;
273
        if (tss_selector & 4)
274
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
275
        if (load_segment(&e1, &e2, tss_selector) != 0)
276
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
277
        if (e2 & DESC_S_MASK)
278
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280
        if ((type & 7) != 1)
281
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
282
    }
283

    
284
    if (!(e2 & DESC_P_MASK))
285
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
286

    
287
    if (type & 8)
288
        tss_limit_max = 103;
289
    else
290
        tss_limit_max = 43;
291
    tss_limit = get_seg_limit(e1, e2);
292
    tss_base = get_seg_base(e1, e2);
293
    if ((tss_selector & 4) != 0 ||
294
        tss_limit < tss_limit_max)
295
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
297
    if (old_type & 8)
298
        old_tss_limit_max = 103;
299
    else
300
        old_tss_limit_max = 43;
301

    
302
    /* read all the registers from the new TSS */
303
    if (type & 8) {
304
        /* 32 bit */
305
        new_cr3 = ldl_kernel(tss_base + 0x1c);
306
        new_eip = ldl_kernel(tss_base + 0x20);
307
        new_eflags = ldl_kernel(tss_base + 0x24);
308
        for(i = 0; i < 8; i++)
309
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
310
        for(i = 0; i < 6; i++)
311
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
312
        new_ldt = lduw_kernel(tss_base + 0x60);
313
        new_trap = ldl_kernel(tss_base + 0x64);
314
    } else {
315
        /* 16 bit */
316
        new_cr3 = 0;
317
        new_eip = lduw_kernel(tss_base + 0x0e);
318
        new_eflags = lduw_kernel(tss_base + 0x10);
319
        for(i = 0; i < 8; i++)
320
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
321
        for(i = 0; i < 4; i++)
322
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
323
        new_ldt = lduw_kernel(tss_base + 0x2a);
324
        new_segs[R_FS] = 0;
325
        new_segs[R_GS] = 0;
326
        new_trap = 0;
327
    }
328

    
329
    /* NOTE: we must avoid memory exceptions during the task switch,
330
       so we make dummy accesses before */
331
    /* XXX: it can still fail in some cases, so a bigger hack is
332
       necessary to valid the TLB after having done the accesses */
333

    
334
    v1 = ldub_kernel(env->tr.base);
335
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
336
    stb_kernel(env->tr.base, v1);
337
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
338

    
339
    /* clear busy bit (it is restartable) */
340
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
341
        target_ulong ptr;
342
        uint32_t e2;
343
        ptr = env->gdt.base + (env->tr.selector & ~7);
344
        e2 = ldl_kernel(ptr + 4);
345
        e2 &= ~DESC_TSS_BUSY_MASK;
346
        stl_kernel(ptr + 4, e2);
347
    }
348
    old_eflags = compute_eflags();
349
    if (source == SWITCH_TSS_IRET)
350
        old_eflags &= ~NT_MASK;
351

    
352
    /* save the current state in the old TSS */
353
    if (type & 8) {
354
        /* 32 bit */
355
        stl_kernel(env->tr.base + 0x20, next_eip);
356
        stl_kernel(env->tr.base + 0x24, old_eflags);
357
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
358
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
359
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
360
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
361
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
362
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
363
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
364
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
365
        for(i = 0; i < 6; i++)
366
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
367
    } else {
368
        /* 16 bit */
369
        stw_kernel(env->tr.base + 0x0e, next_eip);
370
        stw_kernel(env->tr.base + 0x10, old_eflags);
371
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
372
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
373
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
374
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
375
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
376
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
377
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
378
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
379
        for(i = 0; i < 4; i++)
380
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
381
    }
382

    
383
    /* now if an exception occurs, it will occurs in the next task
384
       context */
385

    
386
    if (source == SWITCH_TSS_CALL) {
387
        stw_kernel(tss_base, env->tr.selector);
388
        new_eflags |= NT_MASK;
389
    }
390

    
391
    /* set busy bit */
392
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
393
        target_ulong ptr;
394
        uint32_t e2;
395
        ptr = env->gdt.base + (tss_selector & ~7);
396
        e2 = ldl_kernel(ptr + 4);
397
        e2 |= DESC_TSS_BUSY_MASK;
398
        stl_kernel(ptr + 4, e2);
399
    }
400

    
401
    /* set the new CPU state */
402
    /* from this point, any exception which occurs can give problems */
403
    env->cr[0] |= CR0_TS_MASK;
404
    env->hflags |= HF_TS_MASK;
405
    env->tr.selector = tss_selector;
406
    env->tr.base = tss_base;
407
    env->tr.limit = tss_limit;
408
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
409

    
410
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
411
        cpu_x86_update_cr3(env, new_cr3);
412
    }
413

    
414
    /* load all registers without an exception, then reload them with
415
       possible exception */
416
    env->eip = new_eip;
417
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
418
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
419
    if (!(type & 8))
420
        eflags_mask &= 0xffff;
421
    load_eflags(new_eflags, eflags_mask);
422
    /* XXX: what to do in 16 bit case ? */
423
    EAX = new_regs[0];
424
    ECX = new_regs[1];
425
    EDX = new_regs[2];
426
    EBX = new_regs[3];
427
    ESP = new_regs[4];
428
    EBP = new_regs[5];
429
    ESI = new_regs[6];
430
    EDI = new_regs[7];
431
    if (new_eflags & VM_MASK) {
432
        for(i = 0; i < 6; i++)
433
            load_seg_vm(i, new_segs[i]);
434
        /* in vm86, CPL is always 3 */
435
        cpu_x86_set_cpl(env, 3);
436
    } else {
437
        /* CPL is set the RPL of CS */
438
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
439
        /* first just selectors as the rest may trigger exceptions */
440
        for(i = 0; i < 6; i++)
441
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
442
    }
443

    
444
    env->ldt.selector = new_ldt & ~4;
445
    env->ldt.base = 0;
446
    env->ldt.limit = 0;
447
    env->ldt.flags = 0;
448

    
449
    /* load the LDT */
450
    if (new_ldt & 4)
451
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452

    
453
    if ((new_ldt & 0xfffc) != 0) {
454
        dt = &env->gdt;
455
        index = new_ldt & ~7;
456
        if ((index + 7) > dt->limit)
457
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
458
        ptr = dt->base + index;
459
        e1 = ldl_kernel(ptr);
460
        e2 = ldl_kernel(ptr + 4);
461
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
462
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463
        if (!(e2 & DESC_P_MASK))
464
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
466
    }
467

    
468
    /* load the segments */
469
    if (!(new_eflags & VM_MASK)) {
470
        tss_load_seg(R_CS, new_segs[R_CS]);
471
        tss_load_seg(R_SS, new_segs[R_SS]);
472
        tss_load_seg(R_ES, new_segs[R_ES]);
473
        tss_load_seg(R_DS, new_segs[R_DS]);
474
        tss_load_seg(R_FS, new_segs[R_FS]);
475
        tss_load_seg(R_GS, new_segs[R_GS]);
476
    }
477

    
478
    /* check that EIP is in the CS segment limits */
479
    if (new_eip > env->segs[R_CS].limit) {
480
        /* XXX: different exception if CALL ? */
481
        raise_exception_err(EXCP0D_GPF, 0);
482
    }
483
}
484

    
485
/* check if Port I/O is allowed in TSS */
486
static inline void check_io(int addr, int size)
487
{
488
    int io_offset, val, mask;
489

    
490
    /* TSS must be a valid 32 bit one */
491
    if (!(env->tr.flags & DESC_P_MASK) ||
492
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
493
        env->tr.limit < 103)
494
        goto fail;
495
    io_offset = lduw_kernel(env->tr.base + 0x66);
496
    io_offset += (addr >> 3);
497
    /* Note: the check needs two bytes */
498
    if ((io_offset + 1) > env->tr.limit)
499
        goto fail;
500
    val = lduw_kernel(env->tr.base + io_offset);
501
    val >>= (addr & 7);
502
    mask = (1 << size) - 1;
503
    /* all bits must be zero to allow the I/O */
504
    if ((val & mask) != 0) {
505
    fail:
506
        raise_exception_err(EXCP0D_GPF, 0);
507
    }
508
}
509

    
510
void check_iob_T0(void)
511
{
512
    check_io(T0, 1);
513
}
514

    
515
void check_iow_T0(void)
516
{
517
    check_io(T0, 2);
518
}
519

    
520
void check_iol_T0(void)
521
{
522
    check_io(T0, 4);
523
}
524

    
525
void check_iob_DX(void)
526
{
527
    check_io(EDX & 0xffff, 1);
528
}
529

    
530
void check_iow_DX(void)
531
{
532
    check_io(EDX & 0xffff, 2);
533
}
534

    
535
void check_iol_DX(void)
536
{
537
    check_io(EDX & 0xffff, 4);
538
}
539

    
540
static inline unsigned int get_sp_mask(unsigned int e2)
541
{
542
    if (e2 & DESC_B_MASK)
543
        return 0xffffffff;
544
    else
545
        return 0xffff;
546
}
547

    
548
#ifdef TARGET_X86_64
549
#define SET_ESP(val, sp_mask)\
550
do {\
551
    if ((sp_mask) == 0xffff)\
552
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
553
    else if ((sp_mask) == 0xffffffffLL)\
554
        ESP = (uint32_t)(val);\
555
    else\
556
        ESP = (val);\
557
} while (0)
558
#else
559
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
560
#endif
561

    
562
/* XXX: add a is_user flag to have proper security support */
563
#define PUSHW(ssp, sp, sp_mask, val)\
564
{\
565
    sp -= 2;\
566
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
567
}
568

    
569
#define PUSHL(ssp, sp, sp_mask, val)\
570
{\
571
    sp -= 4;\
572
    stl_kernel((ssp) + (sp & (sp_mask)), (val));\
573
}
574

    
575
#define POPW(ssp, sp, sp_mask, val)\
576
{\
577
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578
    sp += 2;\
579
}
580

    
581
#define POPL(ssp, sp, sp_mask, val)\
582
{\
583
    val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
584
    sp += 4;\
585
}
586

    
587
/* protected mode interrupt */
588
static void do_interrupt_protected(int intno, int is_int, int error_code,
589
                                   unsigned int next_eip, int is_hw)
590
{
591
    SegmentCache *dt;
592
    target_ulong ptr, ssp;
593
    int type, dpl, selector, ss_dpl, cpl;
594
    int has_error_code, new_stack, shift;
595
    uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
596
    uint32_t old_eip, sp_mask;
597
    int svm_should_check = 1;
598

    
599
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
600
        next_eip = EIP;
601
        svm_should_check = 0;
602
    }
603

    
604
    if (svm_should_check
605
        && (INTERCEPTEDl(_exceptions, 1 << intno)
606
        && !is_int)) {
607
        raise_interrupt(intno, is_int, error_code, 0);
608
    }
609
    has_error_code = 0;
610
    if (!is_int && !is_hw) {
611
        switch(intno) {
612
        case 8:
613
        case 10:
614
        case 11:
615
        case 12:
616
        case 13:
617
        case 14:
618
        case 17:
619
            has_error_code = 1;
620
            break;
621
        }
622
    }
623
    if (is_int)
624
        old_eip = next_eip;
625
    else
626
        old_eip = env->eip;
627

    
628
    dt = &env->idt;
629
    if (intno * 8 + 7 > dt->limit)
630
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
631
    ptr = dt->base + intno * 8;
632
    e1 = ldl_kernel(ptr);
633
    e2 = ldl_kernel(ptr + 4);
634
    /* check gate type */
635
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
636
    switch(type) {
637
    case 5: /* task gate */
638
        /* must do that check here to return the correct error code */
639
        if (!(e2 & DESC_P_MASK))
640
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
641
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
642
        if (has_error_code) {
643
            int type;
644
            uint32_t mask;
645
            /* push the error code */
646
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
647
            shift = type >> 3;
648
            if (env->segs[R_SS].flags & DESC_B_MASK)
649
                mask = 0xffffffff;
650
            else
651
                mask = 0xffff;
652
            esp = (ESP - (2 << shift)) & mask;
653
            ssp = env->segs[R_SS].base + esp;
654
            if (shift)
655
                stl_kernel(ssp, error_code);
656
            else
657
                stw_kernel(ssp, error_code);
658
            SET_ESP(esp, mask);
659
        }
660
        return;
661
    case 6: /* 286 interrupt gate */
662
    case 7: /* 286 trap gate */
663
    case 14: /* 386 interrupt gate */
664
    case 15: /* 386 trap gate */
665
        break;
666
    default:
667
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
668
        break;
669
    }
670
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671
    cpl = env->hflags & HF_CPL_MASK;
672
    /* check privledge if software int */
673
    if (is_int && dpl < cpl)
674
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
675
    /* check valid bit */
676
    if (!(e2 & DESC_P_MASK))
677
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
678
    selector = e1 >> 16;
679
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
680
    if ((selector & 0xfffc) == 0)
681
        raise_exception_err(EXCP0D_GPF, 0);
682

    
683
    if (load_segment(&e1, &e2, selector) != 0)
684
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
685
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
686
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
687
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688
    if (dpl > cpl)
689
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
690
    if (!(e2 & DESC_P_MASK))
691
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
692
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
693
        /* to inner privilege */
694
        get_ss_esp_from_tss(&ss, &esp, dpl);
695
        if ((ss & 0xfffc) == 0)
696
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
697
        if ((ss & 3) != dpl)
698
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
700
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
701
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
702
        if (ss_dpl != dpl)
703
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
704
        if (!(ss_e2 & DESC_S_MASK) ||
705
            (ss_e2 & DESC_CS_MASK) ||
706
            !(ss_e2 & DESC_W_MASK))
707
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
708
        if (!(ss_e2 & DESC_P_MASK))
709
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
710
        new_stack = 1;
711
        sp_mask = get_sp_mask(ss_e2);
712
        ssp = get_seg_base(ss_e1, ss_e2);
713
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
714
        /* to same privilege */
715
        if (env->eflags & VM_MASK)
716
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
717
        new_stack = 0;
718
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
719
        ssp = env->segs[R_SS].base;
720
        esp = ESP;
721
        dpl = cpl;
722
    } else {
723
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
724
        new_stack = 0; /* avoid warning */
725
        sp_mask = 0; /* avoid warning */
726
        ssp = 0; /* avoid warning */
727
        esp = 0; /* avoid warning */
728
    }
729

    
730
    shift = type >> 3;
731

    
732
#if 0
733
    /* XXX: check that enough room is available */
734
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
735
    if (env->eflags & VM_MASK)
736
        push_size += 8;
737
    push_size <<= shift;
738
#endif
739
    if (shift == 1) {
740
        if (new_stack) {
741
            if (env->eflags & VM_MASK) {
742
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
743
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
744
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
745
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
746
            }
747
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
748
            PUSHL(ssp, esp, sp_mask, ESP);
749
        }
750
        PUSHL(ssp, esp, sp_mask, compute_eflags());
751
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
752
        PUSHL(ssp, esp, sp_mask, old_eip);
753
        if (has_error_code) {
754
            PUSHL(ssp, esp, sp_mask, error_code);
755
        }
756
    } else {
757
        if (new_stack) {
758
            if (env->eflags & VM_MASK) {
759
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
760
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
761
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
762
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
763
            }
764
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
765
            PUSHW(ssp, esp, sp_mask, ESP);
766
        }
767
        PUSHW(ssp, esp, sp_mask, compute_eflags());
768
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
769
        PUSHW(ssp, esp, sp_mask, old_eip);
770
        if (has_error_code) {
771
            PUSHW(ssp, esp, sp_mask, error_code);
772
        }
773
    }
774

    
775
    if (new_stack) {
776
        if (env->eflags & VM_MASK) {
777
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
778
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
779
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
780
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
781
        }
782
        ss = (ss & ~3) | dpl;
783
        cpu_x86_load_seg_cache(env, R_SS, ss,
784
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
785
    }
786
    SET_ESP(esp, sp_mask);
787

    
788
    selector = (selector & ~3) | dpl;
789
    cpu_x86_load_seg_cache(env, R_CS, selector,
790
                   get_seg_base(e1, e2),
791
                   get_seg_limit(e1, e2),
792
                   e2);
793
    cpu_x86_set_cpl(env, dpl);
794
    env->eip = offset;
795

    
796
    /* interrupt gate clear IF mask */
797
    if ((type & 1) == 0) {
798
        env->eflags &= ~IF_MASK;
799
    }
800
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
801
}
802

    
803
#ifdef TARGET_X86_64
804

    
805
#define PUSHQ(sp, val)\
806
{\
807
    sp -= 8;\
808
    stq_kernel(sp, (val));\
809
}
810

    
811
#define POPQ(sp, val)\
812
{\
813
    val = ldq_kernel(sp);\
814
    sp += 8;\
815
}
816

    
817
static inline target_ulong get_rsp_from_tss(int level)
818
{
819
    int index;
820

    
821
#if 0
822
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
823
           env->tr.base, env->tr.limit);
824
#endif
825

    
826
    if (!(env->tr.flags & DESC_P_MASK))
827
        cpu_abort(env, "invalid tss");
828
    index = 8 * level + 4;
829
    if ((index + 7) > env->tr.limit)
830
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
831
    return ldq_kernel(env->tr.base + index);
832
}
833

    
834
/* 64 bit interrupt */
835
static void do_interrupt64(int intno, int is_int, int error_code,
836
                           target_ulong next_eip, int is_hw)
837
{
838
    SegmentCache *dt;
839
    target_ulong ptr;
840
    int type, dpl, selector, cpl, ist;
841
    int has_error_code, new_stack;
842
    uint32_t e1, e2, e3, ss;
843
    target_ulong old_eip, esp, offset;
844
    int svm_should_check = 1;
845

    
846
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
847
        next_eip = EIP;
848
        svm_should_check = 0;
849
    }
850
    if (svm_should_check
851
        && INTERCEPTEDl(_exceptions, 1 << intno)
852
        && !is_int) {
853
        raise_interrupt(intno, is_int, error_code, 0);
854
    }
855
    has_error_code = 0;
856
    if (!is_int && !is_hw) {
857
        switch(intno) {
858
        case 8:
859
        case 10:
860
        case 11:
861
        case 12:
862
        case 13:
863
        case 14:
864
        case 17:
865
            has_error_code = 1;
866
            break;
867
        }
868
    }
869
    if (is_int)
870
        old_eip = next_eip;
871
    else
872
        old_eip = env->eip;
873

    
874
    dt = &env->idt;
875
    if (intno * 16 + 15 > dt->limit)
876
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
877
    ptr = dt->base + intno * 16;
878
    e1 = ldl_kernel(ptr);
879
    e2 = ldl_kernel(ptr + 4);
880
    e3 = ldl_kernel(ptr + 8);
881
    /* check gate type */
882
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
883
    switch(type) {
884
    case 14: /* 386 interrupt gate */
885
    case 15: /* 386 trap gate */
886
        break;
887
    default:
888
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
889
        break;
890
    }
891
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
892
    cpl = env->hflags & HF_CPL_MASK;
893
    /* check privledge if software int */
894
    if (is_int && dpl < cpl)
895
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
896
    /* check valid bit */
897
    if (!(e2 & DESC_P_MASK))
898
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
899
    selector = e1 >> 16;
900
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901
    ist = e2 & 7;
902
    if ((selector & 0xfffc) == 0)
903
        raise_exception_err(EXCP0D_GPF, 0);
904

    
905
    if (load_segment(&e1, &e2, selector) != 0)
906
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
907
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
908
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
909
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
910
    if (dpl > cpl)
911
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
912
    if (!(e2 & DESC_P_MASK))
913
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
914
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
915
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
916
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
917
        /* to inner privilege */
918
        if (ist != 0)
919
            esp = get_rsp_from_tss(ist + 3);
920
        else
921
            esp = get_rsp_from_tss(dpl);
922
        esp &= ~0xfLL; /* align stack */
923
        ss = 0;
924
        new_stack = 1;
925
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
926
        /* to same privilege */
927
        if (env->eflags & VM_MASK)
928
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929
        new_stack = 0;
930
        if (ist != 0)
931
            esp = get_rsp_from_tss(ist + 3);
932
        else
933
            esp = ESP;
934
        esp &= ~0xfLL; /* align stack */
935
        dpl = cpl;
936
    } else {
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
        new_stack = 0; /* avoid warning */
939
        esp = 0; /* avoid warning */
940
    }
941

    
942
    PUSHQ(esp, env->segs[R_SS].selector);
943
    PUSHQ(esp, ESP);
944
    PUSHQ(esp, compute_eflags());
945
    PUSHQ(esp, env->segs[R_CS].selector);
946
    PUSHQ(esp, old_eip);
947
    if (has_error_code) {
948
        PUSHQ(esp, error_code);
949
    }
950

    
951
    if (new_stack) {
952
        ss = 0 | dpl;
953
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
954
    }
955
    ESP = esp;
956

    
957
    selector = (selector & ~3) | dpl;
958
    cpu_x86_load_seg_cache(env, R_CS, selector,
959
                   get_seg_base(e1, e2),
960
                   get_seg_limit(e1, e2),
961
                   e2);
962
    cpu_x86_set_cpl(env, dpl);
963
    env->eip = offset;
964

    
965
    /* interrupt gate clear IF mask */
966
    if ((type & 1) == 0) {
967
        env->eflags &= ~IF_MASK;
968
    }
969
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
970
}
971
#endif
972

    
973
void helper_syscall(int next_eip_addend)
974
{
975
    int selector;
976

    
977
    if (!(env->efer & MSR_EFER_SCE)) {
978
        raise_exception_err(EXCP06_ILLOP, 0);
979
    }
980
    selector = (env->star >> 32) & 0xffff;
981
#ifdef TARGET_X86_64
982
    if (env->hflags & HF_LMA_MASK) {
983
        int code64;
984

    
985
        ECX = env->eip + next_eip_addend;
986
        env->regs[11] = compute_eflags();
987

    
988
        code64 = env->hflags & HF_CS64_MASK;
989

    
990
        cpu_x86_set_cpl(env, 0);
991
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
992
                           0, 0xffffffff,
993
                               DESC_G_MASK | DESC_P_MASK |
994
                               DESC_S_MASK |
995
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
996
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
997
                               0, 0xffffffff,
998
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
999
                               DESC_S_MASK |
1000
                               DESC_W_MASK | DESC_A_MASK);
1001
        env->eflags &= ~env->fmask;
1002
        if (code64)
1003
            env->eip = env->lstar;
1004
        else
1005
            env->eip = env->cstar;
1006
    } else
1007
#endif
1008
    {
1009
        ECX = (uint32_t)(env->eip + next_eip_addend);
1010

    
1011
        cpu_x86_set_cpl(env, 0);
1012
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1013
                           0, 0xffffffff,
1014
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1015
                               DESC_S_MASK |
1016
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1017
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1018
                               0, 0xffffffff,
1019
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1020
                               DESC_S_MASK |
1021
                               DESC_W_MASK | DESC_A_MASK);
1022
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1023
        env->eip = (uint32_t)env->star;
1024
    }
1025
}
1026

    
1027
void helper_sysret(int dflag)
1028
{
1029
    int cpl, selector;
1030

    
1031
    if (!(env->efer & MSR_EFER_SCE)) {
1032
        raise_exception_err(EXCP06_ILLOP, 0);
1033
    }
1034
    cpl = env->hflags & HF_CPL_MASK;
1035
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1036
        raise_exception_err(EXCP0D_GPF, 0);
1037
    }
1038
    selector = (env->star >> 48) & 0xffff;
1039
#ifdef TARGET_X86_64
1040
    if (env->hflags & HF_LMA_MASK) {
1041
        if (dflag == 2) {
1042
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1043
                                   0, 0xffffffff,
1044
                                   DESC_G_MASK | DESC_P_MASK |
1045
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1046
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1047
                                   DESC_L_MASK);
1048
            env->eip = ECX;
1049
        } else {
1050
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1051
                                   0, 0xffffffff,
1052
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1054
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1055
            env->eip = (uint32_t)ECX;
1056
        }
1057
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1058
                               0, 0xffffffff,
1059
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1061
                               DESC_W_MASK | DESC_A_MASK);
1062
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1063
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1064
        cpu_x86_set_cpl(env, 3);
1065
    } else
1066
#endif
1067
    {
1068
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1069
                               0, 0xffffffff,
1070
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1073
        env->eip = (uint32_t)ECX;
1074
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1075
                               0, 0xffffffff,
1076
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078
                               DESC_W_MASK | DESC_A_MASK);
1079
        env->eflags |= IF_MASK;
1080
        cpu_x86_set_cpl(env, 3);
1081
    }
1082
#ifdef USE_KQEMU
1083
    if (kqemu_is_ok(env)) {
1084
        if (env->hflags & HF_LMA_MASK)
1085
            CC_OP = CC_OP_EFLAGS;
1086
        env->exception_index = -1;
1087
        cpu_loop_exit();
1088
    }
1089
#endif
1090
}
1091

    
1092
/* real mode interrupt */
1093
static void do_interrupt_real(int intno, int is_int, int error_code,
1094
                              unsigned int next_eip)
1095
{
1096
    SegmentCache *dt;
1097
    target_ulong ptr, ssp;
1098
    int selector;
1099
    uint32_t offset, esp;
1100
    uint32_t old_cs, old_eip;
1101
    int svm_should_check = 1;
1102

    
1103
    if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1104
        next_eip = EIP;
1105
        svm_should_check = 0;
1106
    }
1107
    if (svm_should_check
1108
        && INTERCEPTEDl(_exceptions, 1 << intno)
1109
        && !is_int) {
1110
        raise_interrupt(intno, is_int, error_code, 0);
1111
    }
1112
    /* real mode (simpler !) */
1113
    dt = &env->idt;
1114
    if (intno * 4 + 3 > dt->limit)
1115
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1116
    ptr = dt->base + intno * 4;
1117
    offset = lduw_kernel(ptr);
1118
    selector = lduw_kernel(ptr + 2);
1119
    esp = ESP;
1120
    ssp = env->segs[R_SS].base;
1121
    if (is_int)
1122
        old_eip = next_eip;
1123
    else
1124
        old_eip = env->eip;
1125
    old_cs = env->segs[R_CS].selector;
1126
    /* XXX: use SS segment size ? */
1127
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1128
    PUSHW(ssp, esp, 0xffff, old_cs);
1129
    PUSHW(ssp, esp, 0xffff, old_eip);
1130

    
1131
    /* update processor state */
1132
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1133
    env->eip = offset;
1134
    env->segs[R_CS].selector = selector;
1135
    env->segs[R_CS].base = (selector << 4);
1136
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1137
}
1138

    
1139
/* fake user mode interrupt */
1140
void do_interrupt_user(int intno, int is_int, int error_code,
1141
                       target_ulong next_eip)
1142
{
1143
    SegmentCache *dt;
1144
    target_ulong ptr;
1145
    int dpl, cpl;
1146
    uint32_t e2;
1147

    
1148
    dt = &env->idt;
1149
    ptr = dt->base + (intno * 8);
1150
    e2 = ldl_kernel(ptr + 4);
1151

    
1152
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1153
    cpl = env->hflags & HF_CPL_MASK;
1154
    /* check privledge if software int */
1155
    if (is_int && dpl < cpl)
1156
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157

    
1158
    /* Since we emulate only user space, we cannot do more than
1159
       exiting the emulation with the suitable exception and error
1160
       code */
1161
    if (is_int)
1162
        EIP = next_eip;
1163
}
1164

    
1165
/*
1166
 * Begin execution of an interruption. is_int is TRUE if coming from
1167
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1168
 * instruction. It is only relevant if is_int is TRUE.
1169
 */
1170
void do_interrupt(int intno, int is_int, int error_code,
1171
                  target_ulong next_eip, int is_hw)
1172
{
1173
    if (loglevel & CPU_LOG_INT) {
1174
        if ((env->cr[0] & CR0_PE_MASK)) {
1175
            static int count;
1176
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1177
                    count, intno, error_code, is_int,
1178
                    env->hflags & HF_CPL_MASK,
1179
                    env->segs[R_CS].selector, EIP,
1180
                    (int)env->segs[R_CS].base + EIP,
1181
                    env->segs[R_SS].selector, ESP);
1182
            if (intno == 0x0e) {
1183
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1184
            } else {
1185
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1186
            }
1187
            fprintf(logfile, "\n");
1188
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1189
#if 0
1190
            {
1191
                int i;
1192
                uint8_t *ptr;
1193
                fprintf(logfile, "       code=");
1194
                ptr = env->segs[R_CS].base + env->eip;
1195
                for(i = 0; i < 16; i++) {
1196
                    fprintf(logfile, " %02x", ldub(ptr + i));
1197
                }
1198
                fprintf(logfile, "\n");
1199
            }
1200
#endif
1201
            count++;
1202
        }
1203
    }
1204
    if (env->cr[0] & CR0_PE_MASK) {
1205
#if TARGET_X86_64
1206
        if (env->hflags & HF_LMA_MASK) {
1207
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1208
        } else
1209
#endif
1210
        {
1211
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1212
        }
1213
    } else {
1214
        do_interrupt_real(intno, is_int, error_code, next_eip);
1215
    }
1216
}
1217

    
1218
/*
1219
 * Check nested exceptions and change to double or triple fault if
1220
 * needed. It should only be called, if this is not an interrupt.
1221
 * Returns the new exception number.
1222
 */
1223
int check_exception(int intno, int *error_code)
1224
{
1225
    char first_contributory = env->old_exception == 0 ||
1226
                              (env->old_exception >= 10 &&
1227
                               env->old_exception <= 13);
1228
    char second_contributory = intno == 0 ||
1229
                               (intno >= 10 && intno <= 13);
1230

    
1231
    if (loglevel & CPU_LOG_INT)
1232
        fprintf(logfile, "check_exception old: %x new %x\n",
1233
                env->old_exception, intno);
1234

    
1235
    if (env->old_exception == EXCP08_DBLE)
1236
        cpu_abort(env, "triple fault");
1237

    
1238
    if ((first_contributory && second_contributory)
1239
        || (env->old_exception == EXCP0E_PAGE &&
1240
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1241
        intno = EXCP08_DBLE;
1242
        *error_code = 0;
1243
    }
1244

    
1245
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1246
        (intno == EXCP08_DBLE))
1247
        env->old_exception = intno;
1248

    
1249
    return intno;
1250
}
1251

    
1252
/*
1253
 * Signal an interruption. It is executed in the main CPU loop.
1254
 * is_int is TRUE if coming from the int instruction. next_eip is the
1255
 * EIP value AFTER the interrupt instruction. It is only relevant if
1256
 * is_int is TRUE.
1257
 */
1258
void raise_interrupt(int intno, int is_int, int error_code,
1259
                     int next_eip_addend)
1260
{
1261
    if (!is_int) {
1262
        svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1263
        intno = check_exception(intno, &error_code);
1264
    }
1265

    
1266
    env->exception_index = intno;
1267
    env->error_code = error_code;
1268
    env->exception_is_int = is_int;
1269
    env->exception_next_eip = env->eip + next_eip_addend;
1270
    cpu_loop_exit();
1271
}
1272

    
1273
/* same as raise_exception_err, but do not restore global registers */
1274
static void raise_exception_err_norestore(int exception_index, int error_code)
1275
{
1276
    exception_index = check_exception(exception_index, &error_code);
1277

    
1278
    env->exception_index = exception_index;
1279
    env->error_code = error_code;
1280
    env->exception_is_int = 0;
1281
    env->exception_next_eip = 0;
1282
    longjmp(env->jmp_env, 1);
1283
}
1284

    
1285
/* shortcuts to generate exceptions */
1286

    
1287
void (raise_exception_err)(int exception_index, int error_code)
1288
{
1289
    raise_interrupt(exception_index, 0, error_code, 0);
1290
}
1291

    
1292
void raise_exception(int exception_index)
1293
{
1294
    raise_interrupt(exception_index, 0, 0, 0);
1295
}
1296

    
1297
/* SMM support */
1298

    
1299
#if defined(CONFIG_USER_ONLY)
1300

    
1301
void do_smm_enter(void)
1302
{
1303
}
1304

    
1305
void helper_rsm(void)
1306
{
1307
}
1308

    
1309
#else
1310

    
1311
#ifdef TARGET_X86_64
1312
#define SMM_REVISION_ID 0x00020064
1313
#else
1314
#define SMM_REVISION_ID 0x00020000
1315
#endif
1316

    
1317
void do_smm_enter(void)
1318
{
1319
    target_ulong sm_state;
1320
    SegmentCache *dt;
1321
    int i, offset;
1322

    
1323
    if (loglevel & CPU_LOG_INT) {
1324
        fprintf(logfile, "SMM: enter\n");
1325
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1326
    }
1327

    
1328
    env->hflags |= HF_SMM_MASK;
1329
    cpu_smm_update(env);
1330

    
1331
    sm_state = env->smbase + 0x8000;
1332

    
1333
#ifdef TARGET_X86_64
1334
    for(i = 0; i < 6; i++) {
1335
        dt = &env->segs[i];
1336
        offset = 0x7e00 + i * 16;
1337
        stw_phys(sm_state + offset, dt->selector);
1338
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1339
        stl_phys(sm_state + offset + 4, dt->limit);
1340
        stq_phys(sm_state + offset + 8, dt->base);
1341
    }
1342

    
1343
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1344
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1345

    
1346
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1347
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1348
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1349
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1350

    
1351
    stq_phys(sm_state + 0x7e88, env->idt.base);
1352
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1353

    
1354
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1355
    stq_phys(sm_state + 0x7e98, env->tr.base);
1356
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1357
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1358

    
1359
    stq_phys(sm_state + 0x7ed0, env->efer);
1360

    
1361
    stq_phys(sm_state + 0x7ff8, EAX);
1362
    stq_phys(sm_state + 0x7ff0, ECX);
1363
    stq_phys(sm_state + 0x7fe8, EDX);
1364
    stq_phys(sm_state + 0x7fe0, EBX);
1365
    stq_phys(sm_state + 0x7fd8, ESP);
1366
    stq_phys(sm_state + 0x7fd0, EBP);
1367
    stq_phys(sm_state + 0x7fc8, ESI);
1368
    stq_phys(sm_state + 0x7fc0, EDI);
1369
    for(i = 8; i < 16; i++)
1370
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1371
    stq_phys(sm_state + 0x7f78, env->eip);
1372
    stl_phys(sm_state + 0x7f70, compute_eflags());
1373
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1374
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1375

    
1376
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1377
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1378
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1379

    
1380
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1381
    stl_phys(sm_state + 0x7f00, env->smbase);
1382
#else
1383
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1384
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1385
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1386
    stl_phys(sm_state + 0x7ff0, env->eip);
1387
    stl_phys(sm_state + 0x7fec, EDI);
1388
    stl_phys(sm_state + 0x7fe8, ESI);
1389
    stl_phys(sm_state + 0x7fe4, EBP);
1390
    stl_phys(sm_state + 0x7fe0, ESP);
1391
    stl_phys(sm_state + 0x7fdc, EBX);
1392
    stl_phys(sm_state + 0x7fd8, EDX);
1393
    stl_phys(sm_state + 0x7fd4, ECX);
1394
    stl_phys(sm_state + 0x7fd0, EAX);
1395
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1396
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1397

    
1398
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1399
    stl_phys(sm_state + 0x7f64, env->tr.base);
1400
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1401
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1402

    
1403
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1404
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1405
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1406
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1407

    
1408
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1409
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1410

    
1411
    stl_phys(sm_state + 0x7f58, env->idt.base);
1412
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1413

    
1414
    for(i = 0; i < 6; i++) {
1415
        dt = &env->segs[i];
1416
        if (i < 3)
1417
            offset = 0x7f84 + i * 12;
1418
        else
1419
            offset = 0x7f2c + (i - 3) * 12;
1420
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1421
        stl_phys(sm_state + offset + 8, dt->base);
1422
        stl_phys(sm_state + offset + 4, dt->limit);
1423
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1424
    }
1425
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1426

    
1427
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1428
    stl_phys(sm_state + 0x7ef8, env->smbase);
1429
#endif
1430
    /* init SMM cpu state */
1431

    
1432
#ifdef TARGET_X86_64
1433
    env->efer = 0;
1434
    env->hflags &= ~HF_LMA_MASK;
1435
#endif
1436
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1437
    env->eip = 0x00008000;
1438
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1439
                           0xffffffff, 0);
1440
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1441
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1442
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1443
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1444
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1445

    
1446
    cpu_x86_update_cr0(env,
1447
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1448
    cpu_x86_update_cr4(env, 0);
1449
    env->dr[7] = 0x00000400;
1450
    CC_OP = CC_OP_EFLAGS;
1451
}
1452

    
1453
void helper_rsm(void)
1454
{
1455
    target_ulong sm_state;
1456
    int i, offset;
1457
    uint32_t val;
1458

    
1459
    sm_state = env->smbase + 0x8000;
1460
#ifdef TARGET_X86_64
1461
    env->efer = ldq_phys(sm_state + 0x7ed0);
1462
    if (env->efer & MSR_EFER_LMA)
1463
        env->hflags |= HF_LMA_MASK;
1464
    else
1465
        env->hflags &= ~HF_LMA_MASK;
1466

    
1467
    for(i = 0; i < 6; i++) {
1468
        offset = 0x7e00 + i * 16;
1469
        cpu_x86_load_seg_cache(env, i,
1470
                               lduw_phys(sm_state + offset),
1471
                               ldq_phys(sm_state + offset + 8),
1472
                               ldl_phys(sm_state + offset + 4),
1473
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1474
    }
1475

    
1476
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1478

    
1479
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1483

    
1484
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1485
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1486

    
1487
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1489
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1491

    
1492
    EAX = ldq_phys(sm_state + 0x7ff8);
1493
    ECX = ldq_phys(sm_state + 0x7ff0);
1494
    EDX = ldq_phys(sm_state + 0x7fe8);
1495
    EBX = ldq_phys(sm_state + 0x7fe0);
1496
    ESP = ldq_phys(sm_state + 0x7fd8);
1497
    EBP = ldq_phys(sm_state + 0x7fd0);
1498
    ESI = ldq_phys(sm_state + 0x7fc8);
1499
    EDI = ldq_phys(sm_state + 0x7fc0);
1500
    for(i = 8; i < 16; i++)
1501
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502
    env->eip = ldq_phys(sm_state + 0x7f78);
1503
    load_eflags(ldl_phys(sm_state + 0x7f70),
1504
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1507

    
1508
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1511

    
1512
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513
    if (val & 0x20000) {
1514
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1515
    }
1516
#else
1517
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1519
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1520
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521
    env->eip = ldl_phys(sm_state + 0x7ff0);
1522
    EDI = ldl_phys(sm_state + 0x7fec);
1523
    ESI = ldl_phys(sm_state + 0x7fe8);
1524
    EBP = ldl_phys(sm_state + 0x7fe4);
1525
    ESP = ldl_phys(sm_state + 0x7fe0);
1526
    EBX = ldl_phys(sm_state + 0x7fdc);
1527
    EDX = ldl_phys(sm_state + 0x7fd8);
1528
    ECX = ldl_phys(sm_state + 0x7fd4);
1529
    EAX = ldl_phys(sm_state + 0x7fd0);
1530
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1532

    
1533
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1535
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1537

    
1538
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1542

    
1543
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1545

    
1546
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1547
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1548

    
1549
    for(i = 0; i < 6; i++) {
1550
        if (i < 3)
1551
            offset = 0x7f84 + i * 12;
1552
        else
1553
            offset = 0x7f2c + (i - 3) * 12;
1554
        cpu_x86_load_seg_cache(env, i,
1555
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556
                               ldl_phys(sm_state + offset + 8),
1557
                               ldl_phys(sm_state + offset + 4),
1558
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1559
    }
1560
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1561

    
1562
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563
    if (val & 0x20000) {
1564
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1565
    }
1566
#endif
1567
    CC_OP = CC_OP_EFLAGS;
1568
    env->hflags &= ~HF_SMM_MASK;
1569
    cpu_smm_update(env);
1570

    
1571
    if (loglevel & CPU_LOG_INT) {
1572
        fprintf(logfile, "SMM: after RSM\n");
1573
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1574
    }
1575
}
1576

    
1577
#endif /* !CONFIG_USER_ONLY */
1578

    
1579

    
1580
#ifdef BUGGY_GCC_DIV64
1581
/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1582
   call it from another function */
1583
uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1584
{
1585
    *q_ptr = num / den;
1586
    return num % den;
1587
}
1588

    
1589
int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1590
{
1591
    *q_ptr = num / den;
1592
    return num % den;
1593
}
1594
#endif
1595

    
1596
void helper_divl_EAX_T0(void)
1597
{
1598
    unsigned int den, r;
1599
    uint64_t num, q;
1600

    
1601
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1602
    den = T0;
1603
    if (den == 0) {
1604
        raise_exception(EXCP00_DIVZ);
1605
    }
1606
#ifdef BUGGY_GCC_DIV64
1607
    r = div32(&q, num, den);
1608
#else
1609
    q = (num / den);
1610
    r = (num % den);
1611
#endif
1612
    if (q > 0xffffffff)
1613
        raise_exception(EXCP00_DIVZ);
1614
    EAX = (uint32_t)q;
1615
    EDX = (uint32_t)r;
1616
}
1617

    
1618
void helper_idivl_EAX_T0(void)
1619
{
1620
    int den, r;
1621
    int64_t num, q;
1622

    
1623
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1624
    den = T0;
1625
    if (den == 0) {
1626
        raise_exception(EXCP00_DIVZ);
1627
    }
1628
#ifdef BUGGY_GCC_DIV64
1629
    r = idiv32(&q, num, den);
1630
#else
1631
    q = (num / den);
1632
    r = (num % den);
1633
#endif
1634
    if (q != (int32_t)q)
1635
        raise_exception(EXCP00_DIVZ);
1636
    EAX = (uint32_t)q;
1637
    EDX = (uint32_t)r;
1638
}
1639

    
1640
void helper_cmpxchg8b(void)
1641
{
1642
    uint64_t d;
1643
    int eflags;
1644

    
1645
    eflags = cc_table[CC_OP].compute_all();
1646
    d = ldq(A0);
1647
    if (d == (((uint64_t)EDX << 32) | EAX)) {
1648
        stq(A0, ((uint64_t)ECX << 32) | EBX);
1649
        eflags |= CC_Z;
1650
    } else {
1651
        EDX = d >> 32;
1652
        EAX = d;
1653
        eflags &= ~CC_Z;
1654
    }
1655
    CC_SRC = eflags;
1656
}
1657

    
1658
void helper_single_step()
1659
{
1660
    env->dr[6] |= 0x4000;
1661
    raise_exception(EXCP01_SSTP);
1662
}
1663

    
1664
void helper_cpuid(void)
1665
{
1666
    uint32_t index;
1667
    index = (uint32_t)EAX;
1668

    
1669
    /* test if maximum index reached */
1670
    if (index & 0x80000000) {
1671
        if (index > env->cpuid_xlevel)
1672
            index = env->cpuid_level;
1673
    } else {
1674
        if (index > env->cpuid_level)
1675
            index = env->cpuid_level;
1676
    }
1677

    
1678
    switch(index) {
1679
    case 0:
1680
        EAX = env->cpuid_level;
1681
        EBX = env->cpuid_vendor1;
1682
        EDX = env->cpuid_vendor2;
1683
        ECX = env->cpuid_vendor3;
1684
        break;
1685
    case 1:
1686
        EAX = env->cpuid_version;
1687
        EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1688
        ECX = env->cpuid_ext_features;
1689
        EDX = env->cpuid_features;
1690
        break;
1691
    case 2:
1692
        /* cache info: needed for Pentium Pro compatibility */
1693
        EAX = 1;
1694
        EBX = 0;
1695
        ECX = 0;
1696
        EDX = 0x2c307d;
1697
        break;
1698
    case 0x80000000:
1699
        EAX = env->cpuid_xlevel;
1700
        EBX = env->cpuid_vendor1;
1701
        EDX = env->cpuid_vendor2;
1702
        ECX = env->cpuid_vendor3;
1703
        break;
1704
    case 0x80000001:
1705
        EAX = env->cpuid_features;
1706
        EBX = 0;
1707
        ECX = env->cpuid_ext3_features;
1708
        EDX = env->cpuid_ext2_features;
1709
        break;
1710
    case 0x80000002:
1711
    case 0x80000003:
1712
    case 0x80000004:
1713
        EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1714
        EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1715
        ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1716
        EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1717
        break;
1718
    case 0x80000005:
1719
        /* cache info (L1 cache) */
1720
        EAX = 0x01ff01ff;
1721
        EBX = 0x01ff01ff;
1722
        ECX = 0x40020140;
1723
        EDX = 0x40020140;
1724
        break;
1725
    case 0x80000006:
1726
        /* cache info (L2 cache) */
1727
        EAX = 0;
1728
        EBX = 0x42004200;
1729
        ECX = 0x02008140;
1730
        EDX = 0;
1731
        break;
1732
    case 0x80000008:
1733
        /* virtual & phys address size in low 2 bytes. */
1734
        EAX = 0x00003028;
1735
        EBX = 0;
1736
        ECX = 0;
1737
        EDX = 0;
1738
        break;
1739
    default:
1740
        /* reserved values: zero */
1741
        EAX = 0;
1742
        EBX = 0;
1743
        ECX = 0;
1744
        EDX = 0;
1745
        break;
1746
    }
1747
}
1748

    
1749
void helper_enter_level(int level, int data32)
1750
{
1751
    target_ulong ssp;
1752
    uint32_t esp_mask, esp, ebp;
1753

    
1754
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1755
    ssp = env->segs[R_SS].base;
1756
    ebp = EBP;
1757
    esp = ESP;
1758
    if (data32) {
1759
        /* 32 bit */
1760
        esp -= 4;
1761
        while (--level) {
1762
            esp -= 4;
1763
            ebp -= 4;
1764
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1765
        }
1766
        esp -= 4;
1767
        stl(ssp + (esp & esp_mask), T1);
1768
    } else {
1769
        /* 16 bit */
1770
        esp -= 2;
1771
        while (--level) {
1772
            esp -= 2;
1773
            ebp -= 2;
1774
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1775
        }
1776
        esp -= 2;
1777
        stw(ssp + (esp & esp_mask), T1);
1778
    }
1779
}
1780

    
1781
#ifdef TARGET_X86_64
1782
void helper_enter64_level(int level, int data64)
1783
{
1784
    target_ulong esp, ebp;
1785
    ebp = EBP;
1786
    esp = ESP;
1787

    
1788
    if (data64) {
1789
        /* 64 bit */
1790
        esp -= 8;
1791
        while (--level) {
1792
            esp -= 8;
1793
            ebp -= 8;
1794
            stq(esp, ldq(ebp));
1795
        }
1796
        esp -= 8;
1797
        stq(esp, T1);
1798
    } else {
1799
        /* 16 bit */
1800
        esp -= 2;
1801
        while (--level) {
1802
            esp -= 2;
1803
            ebp -= 2;
1804
            stw(esp, lduw(ebp));
1805
        }
1806
        esp -= 2;
1807
        stw(esp, T1);
1808
    }
1809
}
1810
#endif
1811

    
1812
void helper_lldt_T0(void)
1813
{
1814
    int selector;
1815
    SegmentCache *dt;
1816
    uint32_t e1, e2;
1817
    int index, entry_limit;
1818
    target_ulong ptr;
1819

    
1820
    selector = T0 & 0xffff;
1821
    if ((selector & 0xfffc) == 0) {
1822
        /* XXX: NULL selector case: invalid LDT */
1823
        env->ldt.base = 0;
1824
        env->ldt.limit = 0;
1825
    } else {
1826
        if (selector & 0x4)
1827
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1828
        dt = &env->gdt;
1829
        index = selector & ~7;
1830
#ifdef TARGET_X86_64
1831
        if (env->hflags & HF_LMA_MASK)
1832
            entry_limit = 15;
1833
        else
1834
#endif
1835
            entry_limit = 7;
1836
        if ((index + entry_limit) > dt->limit)
1837
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1838
        ptr = dt->base + index;
1839
        e1 = ldl_kernel(ptr);
1840
        e2 = ldl_kernel(ptr + 4);
1841
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1842
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1843
        if (!(e2 & DESC_P_MASK))
1844
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1845
#ifdef TARGET_X86_64
1846
        if (env->hflags & HF_LMA_MASK) {
1847
            uint32_t e3;
1848
            e3 = ldl_kernel(ptr + 8);
1849
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1850
            env->ldt.base |= (target_ulong)e3 << 32;
1851
        } else
1852
#endif
1853
        {
1854
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1855
        }
1856
    }
1857
    env->ldt.selector = selector;
1858
}
1859

    
1860
void helper_ltr_T0(void)
1861
{
1862
    int selector;
1863
    SegmentCache *dt;
1864
    uint32_t e1, e2;
1865
    int index, type, entry_limit;
1866
    target_ulong ptr;
1867

    
1868
    selector = T0 & 0xffff;
1869
    if ((selector & 0xfffc) == 0) {
1870
        /* NULL selector case: invalid TR */
1871
        env->tr.base = 0;
1872
        env->tr.limit = 0;
1873
        env->tr.flags = 0;
1874
    } else {
1875
        if (selector & 0x4)
1876
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1877
        dt = &env->gdt;
1878
        index = selector & ~7;
1879
#ifdef TARGET_X86_64
1880
        if (env->hflags & HF_LMA_MASK)
1881
            entry_limit = 15;
1882
        else
1883
#endif
1884
            entry_limit = 7;
1885
        if ((index + entry_limit) > dt->limit)
1886
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1887
        ptr = dt->base + index;
1888
        e1 = ldl_kernel(ptr);
1889
        e2 = ldl_kernel(ptr + 4);
1890
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1891
        if ((e2 & DESC_S_MASK) ||
1892
            (type != 1 && type != 9))
1893
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1894
        if (!(e2 & DESC_P_MASK))
1895
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1896
#ifdef TARGET_X86_64
1897
        if (env->hflags & HF_LMA_MASK) {
1898
            uint32_t e3, e4;
1899
            e3 = ldl_kernel(ptr + 8);
1900
            e4 = ldl_kernel(ptr + 12);
1901
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1902
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1903
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1904
            env->tr.base |= (target_ulong)e3 << 32;
1905
        } else
1906
#endif
1907
        {
1908
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1909
        }
1910
        e2 |= DESC_TSS_BUSY_MASK;
1911
        stl_kernel(ptr + 4, e2);
1912
    }
1913
    env->tr.selector = selector;
1914
}
1915

    
1916
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1917
void load_seg(int seg_reg, int selector)
1918
{
1919
    uint32_t e1, e2;
1920
    int cpl, dpl, rpl;
1921
    SegmentCache *dt;
1922
    int index;
1923
    target_ulong ptr;
1924

    
1925
    selector &= 0xffff;
1926
    cpl = env->hflags & HF_CPL_MASK;
1927
    if ((selector & 0xfffc) == 0) {
1928
        /* null selector case */
1929
        if (seg_reg == R_SS
1930
#ifdef TARGET_X86_64
1931
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1932
#endif
1933
            )
1934
            raise_exception_err(EXCP0D_GPF, 0);
1935
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1936
    } else {
1937

    
1938
        if (selector & 0x4)
1939
            dt = &env->ldt;
1940
        else
1941
            dt = &env->gdt;
1942
        index = selector & ~7;
1943
        if ((index + 7) > dt->limit)
1944
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1945
        ptr = dt->base + index;
1946
        e1 = ldl_kernel(ptr);
1947
        e2 = ldl_kernel(ptr + 4);
1948

    
1949
        if (!(e2 & DESC_S_MASK))
1950
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1951
        rpl = selector & 3;
1952
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1953
        if (seg_reg == R_SS) {
1954
            /* must be writable segment */
1955
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1956
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1957
            if (rpl != cpl || dpl != cpl)
1958
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1959
        } else {
1960
            /* must be readable segment */
1961
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1962
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1963

    
1964
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1965
                /* if not conforming code, test rights */
1966
                if (dpl < cpl || dpl < rpl)
1967
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1968
            }
1969
        }
1970

    
1971
        if (!(e2 & DESC_P_MASK)) {
1972
            if (seg_reg == R_SS)
1973
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1974
            else
1975
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1976
        }
1977

    
1978
        /* set the access bit if not already set */
1979
        if (!(e2 & DESC_A_MASK)) {
1980
            e2 |= DESC_A_MASK;
1981
            stl_kernel(ptr + 4, e2);
1982
        }
1983

    
1984
        cpu_x86_load_seg_cache(env, seg_reg, selector,
1985
                       get_seg_base(e1, e2),
1986
                       get_seg_limit(e1, e2),
1987
                       e2);
1988
#if 0
1989
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1990
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1991
#endif
1992
    }
1993
}
1994

    
1995
/* protected mode jump */
1996
void helper_ljmp_protected_T0_T1(int next_eip_addend)
1997
{
1998
    int new_cs, gate_cs, type;
1999
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2000
    target_ulong new_eip, next_eip;
2001

    
2002
    new_cs = T0;
2003
    new_eip = T1;
2004
    if ((new_cs & 0xfffc) == 0)
2005
        raise_exception_err(EXCP0D_GPF, 0);
2006
    if (load_segment(&e1, &e2, new_cs) != 0)
2007
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2008
    cpl = env->hflags & HF_CPL_MASK;
2009
    if (e2 & DESC_S_MASK) {
2010
        if (!(e2 & DESC_CS_MASK))
2011
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2012
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2013
        if (e2 & DESC_C_MASK) {
2014
            /* conforming code segment */
2015
            if (dpl > cpl)
2016
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2017
        } else {
2018
            /* non conforming code segment */
2019
            rpl = new_cs & 3;
2020
            if (rpl > cpl)
2021
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2022
            if (dpl != cpl)
2023
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2024
        }
2025
        if (!(e2 & DESC_P_MASK))
2026
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2027
        limit = get_seg_limit(e1, e2);
2028
        if (new_eip > limit &&
2029
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2030
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2031
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2032
                       get_seg_base(e1, e2), limit, e2);
2033
        EIP = new_eip;
2034
    } else {
2035
        /* jump to call or task gate */
2036
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2037
        rpl = new_cs & 3;
2038
        cpl = env->hflags & HF_CPL_MASK;
2039
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2040
        switch(type) {
2041
        case 1: /* 286 TSS */
2042
        case 9: /* 386 TSS */
2043
        case 5: /* task gate */
2044
            if (dpl < cpl || dpl < rpl)
2045
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2046
            next_eip = env->eip + next_eip_addend;
2047
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2048
            CC_OP = CC_OP_EFLAGS;
2049
            break;
2050
        case 4: /* 286 call gate */
2051
        case 12: /* 386 call gate */
2052
            if ((dpl < cpl) || (dpl < rpl))
2053
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2054
            if (!(e2 & DESC_P_MASK))
2055
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2056
            gate_cs = e1 >> 16;
2057
            new_eip = (e1 & 0xffff);
2058
            if (type == 12)
2059
                new_eip |= (e2 & 0xffff0000);
2060
            if (load_segment(&e1, &e2, gate_cs) != 0)
2061
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2062
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2063
            /* must be code segment */
2064
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2065
                 (DESC_S_MASK | DESC_CS_MASK)))
2066
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2067
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2068
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2069
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2070
            if (!(e2 & DESC_P_MASK))
2071
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2072
            limit = get_seg_limit(e1, e2);
2073
            if (new_eip > limit)
2074
                raise_exception_err(EXCP0D_GPF, 0);
2075
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2076
                                   get_seg_base(e1, e2), limit, e2);
2077
            EIP = new_eip;
2078
            break;
2079
        default:
2080
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2081
            break;
2082
        }
2083
    }
2084
}
2085

    
2086
/* real mode call */
2087
void helper_lcall_real_T0_T1(int shift, int next_eip)
2088
{
2089
    int new_cs, new_eip;
2090
    uint32_t esp, esp_mask;
2091
    target_ulong ssp;
2092

    
2093
    new_cs = T0;
2094
    new_eip = T1;
2095
    esp = ESP;
2096
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2097
    ssp = env->segs[R_SS].base;
2098
    if (shift) {
2099
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2100
        PUSHL(ssp, esp, esp_mask, next_eip);
2101
    } else {
2102
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2103
        PUSHW(ssp, esp, esp_mask, next_eip);
2104
    }
2105

    
2106
    SET_ESP(esp, esp_mask);
2107
    env->eip = new_eip;
2108
    env->segs[R_CS].selector = new_cs;
2109
    env->segs[R_CS].base = (new_cs << 4);
2110
}
2111

    
2112
/* protected mode call */
2113
void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2114
{
2115
    int new_cs, new_stack, i;
2116
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2117
    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2118
    uint32_t val, limit, old_sp_mask;
2119
    target_ulong ssp, old_ssp, next_eip, new_eip;
2120

    
2121
    new_cs = T0;
2122
    new_eip = T1;
2123
    next_eip = env->eip + next_eip_addend;
2124
#ifdef DEBUG_PCALL
2125
    if (loglevel & CPU_LOG_PCALL) {
2126
        fprintf(logfile, "lcall %04x:%08x s=%d\n",
2127
                new_cs, (uint32_t)new_eip, shift);
2128
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2129
    }
2130
#endif
2131
    if ((new_cs & 0xfffc) == 0)
2132
        raise_exception_err(EXCP0D_GPF, 0);
2133
    if (load_segment(&e1, &e2, new_cs) != 0)
2134
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2135
    cpl = env->hflags & HF_CPL_MASK;
2136
#ifdef DEBUG_PCALL
2137
    if (loglevel & CPU_LOG_PCALL) {
2138
        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2139
    }
2140
#endif
2141
    if (e2 & DESC_S_MASK) {
2142
        if (!(e2 & DESC_CS_MASK))
2143
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2144
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2145
        if (e2 & DESC_C_MASK) {
2146
            /* conforming code segment */
2147
            if (dpl > cpl)
2148
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2149
        } else {
2150
            /* non conforming code segment */
2151
            rpl = new_cs & 3;
2152
            if (rpl > cpl)
2153
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2154
            if (dpl != cpl)
2155
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156
        }
2157
        if (!(e2 & DESC_P_MASK))
2158
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2159

    
2160
#ifdef TARGET_X86_64
2161
        /* XXX: check 16/32 bit cases in long mode */
2162
        if (shift == 2) {
2163
            target_ulong rsp;
2164
            /* 64 bit case */
2165
            rsp = ESP;
2166
            PUSHQ(rsp, env->segs[R_CS].selector);
2167
            PUSHQ(rsp, next_eip);
2168
            /* from this point, not restartable */
2169
            ESP = rsp;
2170
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2171
                                   get_seg_base(e1, e2),
2172
                                   get_seg_limit(e1, e2), e2);
2173
            EIP = new_eip;
2174
        } else
2175
#endif
2176
        {
2177
            sp = ESP;
2178
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2179
            ssp = env->segs[R_SS].base;
2180
            if (shift) {
2181
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2182
                PUSHL(ssp, sp, sp_mask, next_eip);
2183
            } else {
2184
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2185
                PUSHW(ssp, sp, sp_mask, next_eip);
2186
            }
2187

    
2188
            limit = get_seg_limit(e1, e2);
2189
            if (new_eip > limit)
2190
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191
            /* from this point, not restartable */
2192
            SET_ESP(sp, sp_mask);
2193
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2194
                                   get_seg_base(e1, e2), limit, e2);
2195
            EIP = new_eip;
2196
        }
2197
    } else {
2198
        /* check gate type */
2199
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2200
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2201
        rpl = new_cs & 3;
2202
        switch(type) {
2203
        case 1: /* available 286 TSS */
2204
        case 9: /* available 386 TSS */
2205
        case 5: /* task gate */
2206
            if (dpl < cpl || dpl < rpl)
2207
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2209
            CC_OP = CC_OP_EFLAGS;
2210
            return;
2211
        case 4: /* 286 call gate */
2212
        case 12: /* 386 call gate */
2213
            break;
2214
        default:
2215
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216
            break;
2217
        }
2218
        shift = type >> 3;
2219

    
2220
        if (dpl < cpl || dpl < rpl)
2221
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
        /* check valid bit */
2223
        if (!(e2 & DESC_P_MASK))
2224
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2225
        selector = e1 >> 16;
2226
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2227
        param_count = e2 & 0x1f;
2228
        if ((selector & 0xfffc) == 0)
2229
            raise_exception_err(EXCP0D_GPF, 0);
2230

    
2231
        if (load_segment(&e1, &e2, selector) != 0)
2232
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2233
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2234
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2235
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2236
        if (dpl > cpl)
2237
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2238
        if (!(e2 & DESC_P_MASK))
2239
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2240

    
2241
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2242
            /* to inner privilege */
2243
            get_ss_esp_from_tss(&ss, &sp, dpl);
2244
#ifdef DEBUG_PCALL
2245
            if (loglevel & CPU_LOG_PCALL)
2246
                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2247
                        ss, sp, param_count, ESP);
2248
#endif
2249
            if ((ss & 0xfffc) == 0)
2250
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2251
            if ((ss & 3) != dpl)
2252
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2253
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2254
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2255
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2256
            if (ss_dpl != dpl)
2257
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2258
            if (!(ss_e2 & DESC_S_MASK) ||
2259
                (ss_e2 & DESC_CS_MASK) ||
2260
                !(ss_e2 & DESC_W_MASK))
2261
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2262
            if (!(ss_e2 & DESC_P_MASK))
2263
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2264

    
2265
            //            push_size = ((param_count * 2) + 8) << shift;
2266

    
2267
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2268
            old_ssp = env->segs[R_SS].base;
2269

    
2270
            sp_mask = get_sp_mask(ss_e2);
2271
            ssp = get_seg_base(ss_e1, ss_e2);
2272
            if (shift) {
2273
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2274
                PUSHL(ssp, sp, sp_mask, ESP);
2275
                for(i = param_count - 1; i >= 0; i--) {
2276
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2277
                    PUSHL(ssp, sp, sp_mask, val);
2278
                }
2279
            } else {
2280
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2281
                PUSHW(ssp, sp, sp_mask, ESP);
2282
                for(i = param_count - 1; i >= 0; i--) {
2283
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2284
                    PUSHW(ssp, sp, sp_mask, val);
2285
                }
2286
            }
2287
            new_stack = 1;
2288
        } else {
2289
            /* to same privilege */
2290
            sp = ESP;
2291
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2292
            ssp = env->segs[R_SS].base;
2293
            //            push_size = (4 << shift);
2294
            new_stack = 0;
2295
        }
2296

    
2297
        if (shift) {
2298
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2299
            PUSHL(ssp, sp, sp_mask, next_eip);
2300
        } else {
2301
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2302
            PUSHW(ssp, sp, sp_mask, next_eip);
2303
        }
2304

    
2305
        /* from this point, not restartable */
2306

    
2307
        if (new_stack) {
2308
            ss = (ss & ~3) | dpl;
2309
            cpu_x86_load_seg_cache(env, R_SS, ss,
2310
                                   ssp,
2311
                                   get_seg_limit(ss_e1, ss_e2),
2312
                                   ss_e2);
2313
        }
2314

    
2315
        selector = (selector & ~3) | dpl;
2316
        cpu_x86_load_seg_cache(env, R_CS, selector,
2317
                       get_seg_base(e1, e2),
2318
                       get_seg_limit(e1, e2),
2319
                       e2);
2320
        cpu_x86_set_cpl(env, dpl);
2321
        SET_ESP(sp, sp_mask);
2322
        EIP = offset;
2323
    }
2324
#ifdef USE_KQEMU
2325
    if (kqemu_is_ok(env)) {
2326
        env->exception_index = -1;
2327
        cpu_loop_exit();
2328
    }
2329
#endif
2330
}
2331

    
2332
/* real and vm86 mode iret */
2333
void helper_iret_real(int shift)
2334
{
2335
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2336
    target_ulong ssp;
2337
    int eflags_mask;
2338

    
2339
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2340
    sp = ESP;
2341
    ssp = env->segs[R_SS].base;
2342
    if (shift == 1) {
2343
        /* 32 bits */
2344
        POPL(ssp, sp, sp_mask, new_eip);
2345
        POPL(ssp, sp, sp_mask, new_cs);
2346
        new_cs &= 0xffff;
2347
        POPL(ssp, sp, sp_mask, new_eflags);
2348
    } else {
2349
        /* 16 bits */
2350
        POPW(ssp, sp, sp_mask, new_eip);
2351
        POPW(ssp, sp, sp_mask, new_cs);
2352
        POPW(ssp, sp, sp_mask, new_eflags);
2353
    }
2354
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2355
    load_seg_vm(R_CS, new_cs);
2356
    env->eip = new_eip;
2357
    if (env->eflags & VM_MASK)
2358
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2359
    else
2360
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2361
    if (shift == 0)
2362
        eflags_mask &= 0xffff;
2363
    load_eflags(new_eflags, eflags_mask);
2364
}
2365

    
2366
static inline void validate_seg(int seg_reg, int cpl)
2367
{
2368
    int dpl;
2369
    uint32_t e2;
2370

    
2371
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2372
       they may still contain a valid base. I would be interested to
2373
       know how a real x86_64 CPU behaves */
2374
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2375
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2376
        return;
2377

    
2378
    e2 = env->segs[seg_reg].flags;
2379
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2380
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2381
        /* data or non conforming code segment */
2382
        if (dpl < cpl) {
2383
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2384
        }
2385
    }
2386
}
2387

    
2388
/* protected mode iret */
2389
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2390
{
2391
    uint32_t new_cs, new_eflags, new_ss;
2392
    uint32_t new_es, new_ds, new_fs, new_gs;
2393
    uint32_t e1, e2, ss_e1, ss_e2;
2394
    int cpl, dpl, rpl, eflags_mask, iopl;
2395
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2396

    
2397
#ifdef TARGET_X86_64
2398
    if (shift == 2)
2399
        sp_mask = -1;
2400
    else
2401
#endif
2402
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2403
    sp = ESP;
2404
    ssp = env->segs[R_SS].base;
2405
    new_eflags = 0; /* avoid warning */
2406
#ifdef TARGET_X86_64
2407
    if (shift == 2) {
2408
        POPQ(sp, new_eip);
2409
        POPQ(sp, new_cs);
2410
        new_cs &= 0xffff;
2411
        if (is_iret) {
2412
            POPQ(sp, new_eflags);
2413
        }
2414
    } else
2415
#endif
2416
    if (shift == 1) {
2417
        /* 32 bits */
2418
        POPL(ssp, sp, sp_mask, new_eip);
2419
        POPL(ssp, sp, sp_mask, new_cs);
2420
        new_cs &= 0xffff;
2421
        if (is_iret) {
2422
            POPL(ssp, sp, sp_mask, new_eflags);
2423
            if (new_eflags & VM_MASK)
2424
                goto return_to_vm86;
2425
        }
2426
    } else {
2427
        /* 16 bits */
2428
        POPW(ssp, sp, sp_mask, new_eip);
2429
        POPW(ssp, sp, sp_mask, new_cs);
2430
        if (is_iret)
2431
            POPW(ssp, sp, sp_mask, new_eflags);
2432
    }
2433
#ifdef DEBUG_PCALL
2434
    if (loglevel & CPU_LOG_PCALL) {
2435
        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2436
                new_cs, new_eip, shift, addend);
2437
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2438
    }
2439
#endif
2440
    if ((new_cs & 0xfffc) == 0)
2441
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2442
    if (load_segment(&e1, &e2, new_cs) != 0)
2443
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2444
    if (!(e2 & DESC_S_MASK) ||
2445
        !(e2 & DESC_CS_MASK))
2446
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2447
    cpl = env->hflags & HF_CPL_MASK;
2448
    rpl = new_cs & 3;
2449
    if (rpl < cpl)
2450
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2451
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2452
    if (e2 & DESC_C_MASK) {
2453
        if (dpl > rpl)
2454
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2455
    } else {
2456
        if (dpl != rpl)
2457
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2458
    }
2459
    if (!(e2 & DESC_P_MASK))
2460
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2461

    
2462
    sp += addend;
2463
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2464
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2465
        /* return to same priledge level */
2466
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2467
                       get_seg_base(e1, e2),
2468
                       get_seg_limit(e1, e2),
2469
                       e2);
2470
    } else {
2471
        /* return to different privilege level */
2472
#ifdef TARGET_X86_64
2473
        if (shift == 2) {
2474
            POPQ(sp, new_esp);
2475
            POPQ(sp, new_ss);
2476
            new_ss &= 0xffff;
2477
        } else
2478
#endif
2479
        if (shift == 1) {
2480
            /* 32 bits */
2481
            POPL(ssp, sp, sp_mask, new_esp);
2482
            POPL(ssp, sp, sp_mask, new_ss);
2483
            new_ss &= 0xffff;
2484
        } else {
2485
            /* 16 bits */
2486
            POPW(ssp, sp, sp_mask, new_esp);
2487
            POPW(ssp, sp, sp_mask, new_ss);
2488
        }
2489
#ifdef DEBUG_PCALL
2490
        if (loglevel & CPU_LOG_PCALL) {
2491
            fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2492
                    new_ss, new_esp);
2493
        }
2494
#endif
2495
        if ((new_ss & 0xfffc) == 0) {
2496
#ifdef TARGET_X86_64
2497
            /* NULL ss is allowed in long mode if cpl != 3*/
2498
            /* XXX: test CS64 ? */
2499
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2500
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2501
                                       0, 0xffffffff,
2502
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2503
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2504
                                       DESC_W_MASK | DESC_A_MASK);
2505
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2506
            } else
2507
#endif
2508
            {
2509
                raise_exception_err(EXCP0D_GPF, 0);
2510
            }
2511
        } else {
2512
            if ((new_ss & 3) != rpl)
2513
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2514
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2515
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2516
            if (!(ss_e2 & DESC_S_MASK) ||
2517
                (ss_e2 & DESC_CS_MASK) ||
2518
                !(ss_e2 & DESC_W_MASK))
2519
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2520
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2521
            if (dpl != rpl)
2522
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2523
            if (!(ss_e2 & DESC_P_MASK))
2524
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2525
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2526
                                   get_seg_base(ss_e1, ss_e2),
2527
                                   get_seg_limit(ss_e1, ss_e2),
2528
                                   ss_e2);
2529
        }
2530

    
2531
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2532
                       get_seg_base(e1, e2),
2533
                       get_seg_limit(e1, e2),
2534
                       e2);
2535
        cpu_x86_set_cpl(env, rpl);
2536
        sp = new_esp;
2537
#ifdef TARGET_X86_64
2538
        if (env->hflags & HF_CS64_MASK)
2539
            sp_mask = -1;
2540
        else
2541
#endif
2542
            sp_mask = get_sp_mask(ss_e2);
2543

    
2544
        /* validate data segments */
2545
        validate_seg(R_ES, rpl);
2546
        validate_seg(R_DS, rpl);
2547
        validate_seg(R_FS, rpl);
2548
        validate_seg(R_GS, rpl);
2549

    
2550
        sp += addend;
2551
    }
2552
    SET_ESP(sp, sp_mask);
2553
    env->eip = new_eip;
2554
    if (is_iret) {
2555
        /* NOTE: 'cpl' is the _old_ CPL */
2556
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2557
        if (cpl == 0)
2558
            eflags_mask |= IOPL_MASK;
2559
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2560
        if (cpl <= iopl)
2561
            eflags_mask |= IF_MASK;
2562
        if (shift == 0)
2563
            eflags_mask &= 0xffff;
2564
        load_eflags(new_eflags, eflags_mask);
2565
    }
2566
    return;
2567

    
2568
 return_to_vm86:
2569
    POPL(ssp, sp, sp_mask, new_esp);
2570
    POPL(ssp, sp, sp_mask, new_ss);
2571
    POPL(ssp, sp, sp_mask, new_es);
2572
    POPL(ssp, sp, sp_mask, new_ds);
2573
    POPL(ssp, sp, sp_mask, new_fs);
2574
    POPL(ssp, sp, sp_mask, new_gs);
2575

    
2576
    /* modify processor state */
2577
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2578
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2579
    load_seg_vm(R_CS, new_cs & 0xffff);
2580
    cpu_x86_set_cpl(env, 3);
2581
    load_seg_vm(R_SS, new_ss & 0xffff);
2582
    load_seg_vm(R_ES, new_es & 0xffff);
2583
    load_seg_vm(R_DS, new_ds & 0xffff);
2584
    load_seg_vm(R_FS, new_fs & 0xffff);
2585
    load_seg_vm(R_GS, new_gs & 0xffff);
2586

    
2587
    env->eip = new_eip & 0xffff;
2588
    ESP = new_esp;
2589
}
2590

    
2591
void helper_iret_protected(int shift, int next_eip)
2592
{
2593
    int tss_selector, type;
2594
    uint32_t e1, e2;
2595

    
2596
    /* specific case for TSS */
2597
    if (env->eflags & NT_MASK) {
2598
#ifdef TARGET_X86_64
2599
        if (env->hflags & HF_LMA_MASK)
2600
            raise_exception_err(EXCP0D_GPF, 0);
2601
#endif
2602
        tss_selector = lduw_kernel(env->tr.base + 0);
2603
        if (tss_selector & 4)
2604
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2605
        if (load_segment(&e1, &e2, tss_selector) != 0)
2606
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2607
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2608
        /* NOTE: we check both segment and busy TSS */
2609
        if (type != 3)
2610
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2611
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2612
    } else {
2613
        helper_ret_protected(shift, 1, 0);
2614
    }
2615
#ifdef USE_KQEMU
2616
    if (kqemu_is_ok(env)) {
2617
        CC_OP = CC_OP_EFLAGS;
2618
        env->exception_index = -1;
2619
        cpu_loop_exit();
2620
    }
2621
#endif
2622
}
2623

    
2624
void helper_lret_protected(int shift, int addend)
2625
{
2626
    helper_ret_protected(shift, 0, addend);
2627
#ifdef USE_KQEMU
2628
    if (kqemu_is_ok(env)) {
2629
        env->exception_index = -1;
2630
        cpu_loop_exit();
2631
    }
2632
#endif
2633
}
2634

    
2635
void helper_sysenter(void)
2636
{
2637
    if (env->sysenter_cs == 0) {
2638
        raise_exception_err(EXCP0D_GPF, 0);
2639
    }
2640
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2641
    cpu_x86_set_cpl(env, 0);
2642
    cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2643
                           0, 0xffffffff,
2644
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2645
                           DESC_S_MASK |
2646
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2647
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2648
                           0, 0xffffffff,
2649
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2650
                           DESC_S_MASK |
2651
                           DESC_W_MASK | DESC_A_MASK);
2652
    ESP = env->sysenter_esp;
2653
    EIP = env->sysenter_eip;
2654
}
2655

    
2656
void helper_sysexit(void)
2657
{
2658
    int cpl;
2659

    
2660
    cpl = env->hflags & HF_CPL_MASK;
2661
    if (env->sysenter_cs == 0 || cpl != 0) {
2662
        raise_exception_err(EXCP0D_GPF, 0);
2663
    }
2664
    cpu_x86_set_cpl(env, 3);
2665
    cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2666
                           0, 0xffffffff,
2667
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2668
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2669
                           DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2670
    cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2671
                           0, 0xffffffff,
2672
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2673
                           DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2674
                           DESC_W_MASK | DESC_A_MASK);
2675
    ESP = ECX;
2676
    EIP = EDX;
2677
#ifdef USE_KQEMU
2678
    if (kqemu_is_ok(env)) {
2679
        env->exception_index = -1;
2680
        cpu_loop_exit();
2681
    }
2682
#endif
2683
}
2684

    
2685
void helper_movl_crN_T0(int reg)
2686
{
2687
#if !defined(CONFIG_USER_ONLY)
2688
    switch(reg) {
2689
    case 0:
2690
        cpu_x86_update_cr0(env, T0);
2691
        break;
2692
    case 3:
2693
        cpu_x86_update_cr3(env, T0);
2694
        break;
2695
    case 4:
2696
        cpu_x86_update_cr4(env, T0);
2697
        break;
2698
    case 8:
2699
        cpu_set_apic_tpr(env, T0);
2700
        break;
2701
    default:
2702
        env->cr[reg] = T0;
2703
        break;
2704
    }
2705
#endif
2706
}
2707

    
2708
/* XXX: do more */
2709
void helper_movl_drN_T0(int reg)
2710
{
2711
    env->dr[reg] = T0;
2712
}
2713

    
2714
void helper_invlpg(target_ulong addr)
2715
{
2716
    cpu_x86_flush_tlb(env, addr);
2717
}
2718

    
2719
void helper_rdtsc(void)
2720
{
2721
    uint64_t val;
2722

    
2723
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2724
        raise_exception(EXCP0D_GPF);
2725
    }
2726
    val = cpu_get_tsc(env);
2727
    EAX = (uint32_t)(val);
2728
    EDX = (uint32_t)(val >> 32);
2729
}
2730

    
2731
#if defined(CONFIG_USER_ONLY)
2732
void helper_wrmsr(void)
2733
{
2734
}
2735

    
2736
void helper_rdmsr(void)
2737
{
2738
}
2739
#else
2740
void helper_wrmsr(void)
2741
{
2742
    uint64_t val;
2743

    
2744
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2745

    
2746
    switch((uint32_t)ECX) {
2747
    case MSR_IA32_SYSENTER_CS:
2748
        env->sysenter_cs = val & 0xffff;
2749
        break;
2750
    case MSR_IA32_SYSENTER_ESP:
2751
        env->sysenter_esp = val;
2752
        break;
2753
    case MSR_IA32_SYSENTER_EIP:
2754
        env->sysenter_eip = val;
2755
        break;
2756
    case MSR_IA32_APICBASE:
2757
        cpu_set_apic_base(env, val);
2758
        break;
2759
    case MSR_EFER:
2760
        {
2761
            uint64_t update_mask;
2762
            update_mask = 0;
2763
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2764
                update_mask |= MSR_EFER_SCE;
2765
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2766
                update_mask |= MSR_EFER_LME;
2767
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2768
                update_mask |= MSR_EFER_FFXSR;
2769
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2770
                update_mask |= MSR_EFER_NXE;
2771
            env->efer = (env->efer & ~update_mask) |
2772
            (val & update_mask);
2773
        }
2774
        break;
2775
    case MSR_STAR:
2776
        env->star = val;
2777
        break;
2778
    case MSR_PAT:
2779
        env->pat = val;
2780
        break;
2781
    case MSR_VM_HSAVE_PA:
2782
        env->vm_hsave = val;
2783
        break;
2784
#ifdef TARGET_X86_64
2785
    case MSR_LSTAR:
2786
        env->lstar = val;
2787
        break;
2788
    case MSR_CSTAR:
2789
        env->cstar = val;
2790
        break;
2791
    case MSR_FMASK:
2792
        env->fmask = val;
2793
        break;
2794
    case MSR_FSBASE:
2795
        env->segs[R_FS].base = val;
2796
        break;
2797
    case MSR_GSBASE:
2798
        env->segs[R_GS].base = val;
2799
        break;
2800
    case MSR_KERNELGSBASE:
2801
        env->kernelgsbase = val;
2802
        break;
2803
#endif
2804
    default:
2805
        /* XXX: exception ? */
2806
        break;
2807
    }
2808
}
2809

    
2810
void helper_rdmsr(void)
2811
{
2812
    uint64_t val;
2813
    switch((uint32_t)ECX) {
2814
    case MSR_IA32_SYSENTER_CS:
2815
        val = env->sysenter_cs;
2816
        break;
2817
    case MSR_IA32_SYSENTER_ESP:
2818
        val = env->sysenter_esp;
2819
        break;
2820
    case MSR_IA32_SYSENTER_EIP:
2821
        val = env->sysenter_eip;
2822
        break;
2823
    case MSR_IA32_APICBASE:
2824
        val = cpu_get_apic_base(env);
2825
        break;
2826
    case MSR_EFER:
2827
        val = env->efer;
2828
        break;
2829
    case MSR_STAR:
2830
        val = env->star;
2831
        break;
2832
    case MSR_PAT:
2833
        val = env->pat;
2834
        break;
2835
    case MSR_VM_HSAVE_PA:
2836
        val = env->vm_hsave;
2837
        break;
2838
#ifdef TARGET_X86_64
2839
    case MSR_LSTAR:
2840
        val = env->lstar;
2841
        break;
2842
    case MSR_CSTAR:
2843
        val = env->cstar;
2844
        break;
2845
    case MSR_FMASK:
2846
        val = env->fmask;
2847
        break;
2848
    case MSR_FSBASE:
2849
        val = env->segs[R_FS].base;
2850
        break;
2851
    case MSR_GSBASE:
2852
        val = env->segs[R_GS].base;
2853
        break;
2854
    case MSR_KERNELGSBASE:
2855
        val = env->kernelgsbase;
2856
        break;
2857
#endif
2858
    default:
2859
        /* XXX: exception ? */
2860
        val = 0;
2861
        break;
2862
    }
2863
    EAX = (uint32_t)(val);
2864
    EDX = (uint32_t)(val >> 32);
2865
}
2866
#endif
2867

    
2868
void helper_lsl(void)
2869
{
2870
    unsigned int selector, limit;
2871
    uint32_t e1, e2, eflags;
2872
    int rpl, dpl, cpl, type;
2873

    
2874
    eflags = cc_table[CC_OP].compute_all();
2875
    selector = T0 & 0xffff;
2876
    if (load_segment(&e1, &e2, selector) != 0)
2877
        goto fail;
2878
    rpl = selector & 3;
2879
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2880
    cpl = env->hflags & HF_CPL_MASK;
2881
    if (e2 & DESC_S_MASK) {
2882
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2883
            /* conforming */
2884
        } else {
2885
            if (dpl < cpl || dpl < rpl)
2886
                goto fail;
2887
        }
2888
    } else {
2889
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2890
        switch(type) {
2891
        case 1:
2892
        case 2:
2893
        case 3:
2894
        case 9:
2895
        case 11:
2896
            break;
2897
        default:
2898
            goto fail;
2899
        }
2900
        if (dpl < cpl || dpl < rpl) {
2901
        fail:
2902
            CC_SRC = eflags & ~CC_Z;
2903
            return;
2904
        }
2905
    }
2906
    limit = get_seg_limit(e1, e2);
2907
    T1 = limit;
2908
    CC_SRC = eflags | CC_Z;
2909
}
2910

    
2911
void helper_lar(void)
2912
{
2913
    unsigned int selector;
2914
    uint32_t e1, e2, eflags;
2915
    int rpl, dpl, cpl, type;
2916

    
2917
    eflags = cc_table[CC_OP].compute_all();
2918
    selector = T0 & 0xffff;
2919
    if ((selector & 0xfffc) == 0)
2920
        goto fail;
2921
    if (load_segment(&e1, &e2, selector) != 0)
2922
        goto fail;
2923
    rpl = selector & 3;
2924
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2925
    cpl = env->hflags & HF_CPL_MASK;
2926
    if (e2 & DESC_S_MASK) {
2927
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2928
            /* conforming */
2929
        } else {
2930
            if (dpl < cpl || dpl < rpl)
2931
                goto fail;
2932
        }
2933
    } else {
2934
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2935
        switch(type) {
2936
        case 1:
2937
        case 2:
2938
        case 3:
2939
        case 4:
2940
        case 5:
2941
        case 9:
2942
        case 11:
2943
        case 12:
2944
            break;
2945
        default:
2946
            goto fail;
2947
        }
2948
        if (dpl < cpl || dpl < rpl) {
2949
        fail:
2950
            CC_SRC = eflags & ~CC_Z;
2951
            return;
2952
        }
2953
    }
2954
    T1 = e2 & 0x00f0ff00;
2955
    CC_SRC = eflags | CC_Z;
2956
}
2957

    
2958
void helper_verr(void)
2959
{
2960
    unsigned int selector;
2961
    uint32_t e1, e2, eflags;
2962
    int rpl, dpl, cpl;
2963

    
2964
    eflags = cc_table[CC_OP].compute_all();
2965
    selector = T0 & 0xffff;
2966
    if ((selector & 0xfffc) == 0)
2967
        goto fail;
2968
    if (load_segment(&e1, &e2, selector) != 0)
2969
        goto fail;
2970
    if (!(e2 & DESC_S_MASK))
2971
        goto fail;
2972
    rpl = selector & 3;
2973
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2974
    cpl = env->hflags & HF_CPL_MASK;
2975
    if (e2 & DESC_CS_MASK) {
2976
        if (!(e2 & DESC_R_MASK))
2977
            goto fail;
2978
        if (!(e2 & DESC_C_MASK)) {
2979
            if (dpl < cpl || dpl < rpl)
2980
                goto fail;
2981
        }
2982
    } else {
2983
        if (dpl < cpl || dpl < rpl) {
2984
        fail:
2985
            CC_SRC = eflags & ~CC_Z;
2986
            return;
2987
        }
2988
    }
2989
    CC_SRC = eflags | CC_Z;
2990
}
2991

    
2992
void helper_verw(void)
2993
{
2994
    unsigned int selector;
2995
    uint32_t e1, e2, eflags;
2996
    int rpl, dpl, cpl;
2997

    
2998
    eflags = cc_table[CC_OP].compute_all();
2999
    selector = T0 & 0xffff;
3000
    if ((selector & 0xfffc) == 0)
3001
        goto fail;
3002
    if (load_segment(&e1, &e2, selector) != 0)
3003
        goto fail;
3004
    if (!(e2 & DESC_S_MASK))
3005
        goto fail;
3006
    rpl = selector & 3;
3007
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3008
    cpl = env->hflags & HF_CPL_MASK;
3009
    if (e2 & DESC_CS_MASK) {
3010
        goto fail;
3011
    } else {
3012
        if (dpl < cpl || dpl < rpl)
3013
            goto fail;
3014
        if (!(e2 & DESC_W_MASK)) {
3015
        fail:
3016
            CC_SRC = eflags & ~CC_Z;
3017
            return;
3018
        }
3019
    }
3020
    CC_SRC = eflags | CC_Z;
3021
}
3022

    
3023
/* FPU helpers */
3024

    
3025
void helper_fldt_ST0_A0(void)
3026
{
3027
    int new_fpstt;
3028
    new_fpstt = (env->fpstt - 1) & 7;
3029
    env->fpregs[new_fpstt].d = helper_fldt(A0);
3030
    env->fpstt = new_fpstt;
3031
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3032
}
3033

    
3034
void helper_fstt_ST0_A0(void)
3035
{
3036
    helper_fstt(ST0, A0);
3037
}
3038

    
3039
void fpu_set_exception(int mask)
3040
{
3041
    env->fpus |= mask;
3042
    if (env->fpus & (~env->fpuc & FPUC_EM))
3043
        env->fpus |= FPUS_SE | FPUS_B;
3044
}
3045

    
3046
CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3047
{
3048
    if (b == 0.0)
3049
        fpu_set_exception(FPUS_ZE);
3050
    return a / b;
3051
}
3052

    
3053
void fpu_raise_exception(void)
3054
{
3055
    if (env->cr[0] & CR0_NE_MASK) {
3056
        raise_exception(EXCP10_COPR);
3057
    }
3058
#if !defined(CONFIG_USER_ONLY)
3059
    else {
3060
        cpu_set_ferr(env);
3061
    }
3062
#endif
3063
}
3064

    
3065
/* BCD ops */
3066

    
3067
void helper_fbld_ST0_A0(void)
3068
{
3069
    CPU86_LDouble tmp;
3070
    uint64_t val;
3071
    unsigned int v;
3072
    int i;
3073

    
3074
    val = 0;
3075
    for(i = 8; i >= 0; i--) {
3076
        v = ldub(A0 + i);
3077
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3078
    }
3079
    tmp = val;
3080
    if (ldub(A0 + 9) & 0x80)
3081
        tmp = -tmp;
3082
    fpush();
3083
    ST0 = tmp;
3084
}
3085

    
3086
void helper_fbst_ST0_A0(void)
3087
{
3088
    int v;
3089
    target_ulong mem_ref, mem_end;
3090
    int64_t val;
3091

    
3092
    val = floatx_to_int64(ST0, &env->fp_status);
3093
    mem_ref = A0;
3094
    mem_end = mem_ref + 9;
3095
    if (val < 0) {
3096
        stb(mem_end, 0x80);
3097
        val = -val;
3098
    } else {
3099
        stb(mem_end, 0x00);
3100
    }
3101
    while (mem_ref < mem_end) {
3102
        if (val == 0)
3103
            break;
3104
        v = val % 100;
3105
        val = val / 100;
3106
        v = ((v / 10) << 4) | (v % 10);
3107
        stb(mem_ref++, v);
3108
    }
3109
    while (mem_ref < mem_end) {
3110
        stb(mem_ref++, 0);
3111
    }
3112
}
3113

    
3114
void helper_f2xm1(void)
3115
{
3116
    ST0 = pow(2.0,ST0) - 1.0;
3117
}
3118

    
3119
void helper_fyl2x(void)
3120
{
3121
    CPU86_LDouble fptemp;
3122

    
3123
    fptemp = ST0;
3124
    if (fptemp>0.0){
3125
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3126
        ST1 *= fptemp;
3127
        fpop();
3128
    } else {
3129
        env->fpus &= (~0x4700);
3130
        env->fpus |= 0x400;
3131
    }
3132
}
3133

    
3134
void helper_fptan(void)
3135
{
3136
    CPU86_LDouble fptemp;
3137

    
3138
    fptemp = ST0;
3139
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3140
        env->fpus |= 0x400;
3141
    } else {
3142
        ST0 = tan(fptemp);
3143
        fpush();
3144
        ST0 = 1.0;
3145
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3146
        /* the above code is for  |arg| < 2**52 only */
3147
    }
3148
}
3149

    
3150
void helper_fpatan(void)
3151
{
3152
    CPU86_LDouble fptemp, fpsrcop;
3153

    
3154
    fpsrcop = ST1;
3155
    fptemp = ST0;
3156
    ST1 = atan2(fpsrcop,fptemp);
3157
    fpop();
3158
}
3159

    
3160
void helper_fxtract(void)
3161
{
3162
    CPU86_LDoubleU temp;
3163
    unsigned int expdif;
3164

    
3165
    temp.d = ST0;
3166
    expdif = EXPD(temp) - EXPBIAS;
3167
    /*DP exponent bias*/
3168
    ST0 = expdif;
3169
    fpush();
3170
    BIASEXPONENT(temp);
3171
    ST0 = temp.d;
3172
}
3173

    
3174
void helper_fprem1(void)
3175
{
3176
    CPU86_LDouble dblq, fpsrcop, fptemp;
3177
    CPU86_LDoubleU fpsrcop1, fptemp1;
3178
    int expdif;
3179
    signed long long int q;
3180

    
3181
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3182
        ST0 = 0.0 / 0.0; /* NaN */
3183
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3184
        return;
3185
    }
3186

    
3187
    fpsrcop = ST0;
3188
    fptemp = ST1;
3189
    fpsrcop1.d = fpsrcop;
3190
    fptemp1.d = fptemp;
3191
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3192

    
3193
    if (expdif < 0) {
3194
        /* optimisation? taken from the AMD docs */
3195
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3196
        /* ST0 is unchanged */
3197
        return;
3198
    }
3199

    
3200
    if (expdif < 53) {
3201
        dblq = fpsrcop / fptemp;
3202
        /* round dblq towards nearest integer */
3203
        dblq = rint(dblq);
3204
        ST0 = fpsrcop - fptemp * dblq;
3205

    
3206
        /* convert dblq to q by truncating towards zero */
3207
        if (dblq < 0.0)
3208
           q = (signed long long int)(-dblq);
3209
        else
3210
           q = (signed long long int)dblq;
3211

    
3212
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3213
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3214
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3215
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3216
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3217
    } else {
3218
        env->fpus |= 0x400;  /* C2 <-- 1 */
3219
        fptemp = pow(2.0, expdif - 50);
3220
        fpsrcop = (ST0 / ST1) / fptemp;
3221
        /* fpsrcop = integer obtained by chopping */
3222
        fpsrcop = (fpsrcop < 0.0) ?
3223
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3224
        ST0 -= (ST1 * fpsrcop * fptemp);
3225
    }
3226
}
3227

    
3228
void helper_fprem(void)
3229
{
3230
    CPU86_LDouble dblq, fpsrcop, fptemp;
3231
    CPU86_LDoubleU fpsrcop1, fptemp1;
3232
    int expdif;
3233
    signed long long int q;
3234

    
3235
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3236
       ST0 = 0.0 / 0.0; /* NaN */
3237
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3238
       return;
3239
    }
3240

    
3241
    fpsrcop = (CPU86_LDouble)ST0;
3242
    fptemp = (CPU86_LDouble)ST1;
3243
    fpsrcop1.d = fpsrcop;
3244
    fptemp1.d = fptemp;
3245
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3246

    
3247
    if (expdif < 0) {
3248
        /* optimisation? taken from the AMD docs */
3249
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3250
        /* ST0 is unchanged */
3251
        return;
3252
    }
3253

    
3254
    if ( expdif < 53 ) {
3255
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3256
        /* round dblq towards zero */
3257
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3258
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3259

    
3260
        /* convert dblq to q by truncating towards zero */
3261
        if (dblq < 0.0)
3262
           q = (signed long long int)(-dblq);
3263
        else
3264
           q = (signed long long int)dblq;
3265

    
3266
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3267
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3268
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3269
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3270
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3271
    } else {
3272
        int N = 32 + (expdif % 32); /* as per AMD docs */
3273
        env->fpus |= 0x400;  /* C2 <-- 1 */
3274
        fptemp = pow(2.0, (double)(expdif - N));
3275
        fpsrcop = (ST0 / ST1) / fptemp;
3276
        /* fpsrcop = integer obtained by chopping */
3277
        fpsrcop = (fpsrcop < 0.0) ?
3278
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3279
        ST0 -= (ST1 * fpsrcop * fptemp);
3280
    }
3281
}
3282

    
3283
void helper_fyl2xp1(void)
3284
{
3285
    CPU86_LDouble fptemp;
3286

    
3287
    fptemp = ST0;
3288
    if ((fptemp+1.0)>0.0) {
3289
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3290
        ST1 *= fptemp;
3291
        fpop();
3292
    } else {
3293
        env->fpus &= (~0x4700);
3294
        env->fpus |= 0x400;
3295
    }
3296
}
3297

    
3298
void helper_fsqrt(void)
3299
{
3300
    CPU86_LDouble fptemp;
3301

    
3302
    fptemp = ST0;
3303
    if (fptemp<0.0) {
3304
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3305
        env->fpus |= 0x400;
3306
    }
3307
    ST0 = sqrt(fptemp);
3308
}
3309

    
3310
void helper_fsincos(void)
3311
{
3312
    CPU86_LDouble fptemp;
3313

    
3314
    fptemp = ST0;
3315
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3316
        env->fpus |= 0x400;
3317
    } else {
3318
        ST0 = sin(fptemp);
3319
        fpush();
3320
        ST0 = cos(fptemp);
3321
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3322
        /* the above code is for  |arg| < 2**63 only */
3323
    }
3324
}
3325

    
3326
void helper_frndint(void)
3327
{
3328
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
3329
}
3330

    
3331
void helper_fscale(void)
3332
{
3333
    ST0 = ldexp (ST0, (int)(ST1));
3334
}
3335

    
3336
void helper_fsin(void)
3337
{
3338
    CPU86_LDouble fptemp;
3339

    
3340
    fptemp = ST0;
3341
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3342
        env->fpus |= 0x400;
3343
    } else {
3344
        ST0 = sin(fptemp);
3345
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3346
        /* the above code is for  |arg| < 2**53 only */
3347
    }
3348
}
3349

    
3350
void helper_fcos(void)
3351
{
3352
    CPU86_LDouble fptemp;
3353

    
3354
    fptemp = ST0;
3355
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3356
        env->fpus |= 0x400;
3357
    } else {
3358
        ST0 = cos(fptemp);
3359
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3360
        /* the above code is for  |arg5 < 2**63 only */
3361
    }
3362
}
3363

    
3364
void helper_fxam_ST0(void)
3365
{
3366
    CPU86_LDoubleU temp;
3367
    int expdif;
3368

    
3369
    temp.d = ST0;
3370

    
3371
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
3372
    if (SIGND(temp))
3373
        env->fpus |= 0x200; /* C1 <-- 1 */
3374

    
3375
    /* XXX: test fptags too */
3376
    expdif = EXPD(temp);
3377
    if (expdif == MAXEXPD) {
3378
#ifdef USE_X86LDOUBLE
3379
        if (MANTD(temp) == 0x8000000000000000ULL)
3380
#else
3381
        if (MANTD(temp) == 0)
3382
#endif
3383
            env->fpus |=  0x500 /*Infinity*/;
3384
        else
3385
            env->fpus |=  0x100 /*NaN*/;
3386
    } else if (expdif == 0) {
3387
        if (MANTD(temp) == 0)
3388
            env->fpus |=  0x4000 /*Zero*/;
3389
        else
3390
            env->fpus |= 0x4400 /*Denormal*/;
3391
    } else {
3392
        env->fpus |= 0x400;
3393
    }
3394
}
3395

    
3396
void helper_fstenv(target_ulong ptr, int data32)
3397
{
3398
    int fpus, fptag, exp, i;
3399
    uint64_t mant;
3400
    CPU86_LDoubleU tmp;
3401

    
3402
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3403
    fptag = 0;
3404
    for (i=7; i>=0; i--) {
3405
        fptag <<= 2;
3406
        if (env->fptags[i]) {
3407
            fptag |= 3;
3408
        } else {
3409
            tmp.d = env->fpregs[i].d;
3410
            exp = EXPD(tmp);
3411
            mant = MANTD(tmp);
3412
            if (exp == 0 && mant == 0) {
3413
                /* zero */
3414
                fptag |= 1;
3415
            } else if (exp == 0 || exp == MAXEXPD
3416
#ifdef USE_X86LDOUBLE
3417
                       || (mant & (1LL << 63)) == 0
3418
#endif
3419
                       ) {
3420
                /* NaNs, infinity, denormal */
3421
                fptag |= 2;
3422
            }
3423
        }
3424
    }
3425
    if (data32) {
3426
        /* 32 bit */
3427
        stl(ptr, env->fpuc);
3428
        stl(ptr + 4, fpus);
3429
        stl(ptr + 8, fptag);
3430
        stl(ptr + 12, 0); /* fpip */
3431
        stl(ptr + 16, 0); /* fpcs */
3432
        stl(ptr + 20, 0); /* fpoo */
3433
        stl(ptr + 24, 0); /* fpos */
3434
    } else {
3435
        /* 16 bit */
3436
        stw(ptr, env->fpuc);
3437
        stw(ptr + 2, fpus);
3438
        stw(ptr + 4, fptag);
3439
        stw(ptr + 6, 0);
3440
        stw(ptr + 8, 0);
3441
        stw(ptr + 10, 0);
3442
        stw(ptr + 12, 0);
3443
    }
3444
}
3445

    
3446
void helper_fldenv(target_ulong ptr, int data32)
3447
{
3448
    int i, fpus, fptag;
3449

    
3450
    if (data32) {
3451
        env->fpuc = lduw(ptr);
3452
        fpus = lduw(ptr + 4);
3453
        fptag = lduw(ptr + 8);
3454
    }
3455
    else {
3456
        env->fpuc = lduw(ptr);
3457
        fpus = lduw(ptr + 2);
3458
        fptag = lduw(ptr + 4);
3459
    }
3460
    env->fpstt = (fpus >> 11) & 7;
3461
    env->fpus = fpus & ~0x3800;
3462
    for(i = 0;i < 8; i++) {
3463
        env->fptags[i] = ((fptag & 3) == 3);
3464
        fptag >>= 2;
3465
    }
3466
}
3467

    
3468
void helper_fsave(target_ulong ptr, int data32)
3469
{
3470
    CPU86_LDouble tmp;
3471
    int i;
3472

    
3473
    helper_fstenv(ptr, data32);
3474

    
3475
    ptr += (14 << data32);
3476
    for(i = 0;i < 8; i++) {
3477
        tmp = ST(i);
3478
        helper_fstt(tmp, ptr);
3479
        ptr += 10;
3480
    }
3481

    
3482
    /* fninit */
3483
    env->fpus = 0;
3484
    env->fpstt = 0;
3485
    env->fpuc = 0x37f;
3486
    env->fptags[0] = 1;
3487
    env->fptags[1] = 1;
3488
    env->fptags[2] = 1;
3489
    env->fptags[3] = 1;
3490
    env->fptags[4] = 1;
3491
    env->fptags[5] = 1;
3492
    env->fptags[6] = 1;
3493
    env->fptags[7] = 1;
3494
}
3495

    
3496
void helper_frstor(target_ulong ptr, int data32)
3497
{
3498
    CPU86_LDouble tmp;
3499
    int i;
3500

    
3501
    helper_fldenv(ptr, data32);
3502
    ptr += (14 << data32);
3503

    
3504
    for(i = 0;i < 8; i++) {
3505
        tmp = helper_fldt(ptr);
3506
        ST(i) = tmp;
3507
        ptr += 10;
3508
    }
3509
}
3510

    
3511
void helper_fxsave(target_ulong ptr, int data64)
3512
{
3513
    int fpus, fptag, i, nb_xmm_regs;
3514
    CPU86_LDouble tmp;
3515
    target_ulong addr;
3516

    
3517
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3518
    fptag = 0;
3519
    for(i = 0; i < 8; i++) {
3520
        fptag |= (env->fptags[i] << i);
3521
    }
3522
    stw(ptr, env->fpuc);
3523
    stw(ptr + 2, fpus);
3524
    stw(ptr + 4, fptag ^ 0xff);
3525

    
3526
    addr = ptr + 0x20;
3527
    for(i = 0;i < 8; i++) {
3528
        tmp = ST(i);
3529
        helper_fstt(tmp, addr);
3530
        addr += 16;
3531
    }
3532

    
3533
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3534
        /* XXX: finish it */
3535
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3536
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3537
        nb_xmm_regs = 8 << data64;
3538
        addr = ptr + 0xa0;
3539
        for(i = 0; i < nb_xmm_regs; i++) {
3540
            stq(addr, env->xmm_regs[i].XMM_Q(0));
3541
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3542
            addr += 16;
3543
        }
3544
    }
3545
}
3546

    
3547
void helper_fxrstor(target_ulong ptr, int data64)
3548
{
3549
    int i, fpus, fptag, nb_xmm_regs;
3550
    CPU86_LDouble tmp;
3551
    target_ulong addr;
3552

    
3553
    env->fpuc = lduw(ptr);
3554
    fpus = lduw(ptr + 2);
3555
    fptag = lduw(ptr + 4);
3556
    env->fpstt = (fpus >> 11) & 7;
3557
    env->fpus = fpus & ~0x3800;
3558
    fptag ^= 0xff;
3559
    for(i = 0;i < 8; i++) {
3560
        env->fptags[i] = ((fptag >> i) & 1);
3561
    }
3562

    
3563
    addr = ptr + 0x20;
3564
    for(i = 0;i < 8; i++) {
3565
        tmp = helper_fldt(addr);
3566
        ST(i) = tmp;
3567
        addr += 16;
3568
    }
3569

    
3570
    if (env->cr[4] & CR4_OSFXSR_MASK) {
3571
        /* XXX: finish it */
3572
        env->mxcsr = ldl(ptr + 0x18);
3573
        //ldl(ptr + 0x1c);
3574
        nb_xmm_regs = 8 << data64;
3575
        addr = ptr + 0xa0;
3576
        for(i = 0; i < nb_xmm_regs; i++) {
3577
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3578
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3579
            addr += 16;
3580
        }
3581
    }
3582
}
3583

    
3584
#ifndef USE_X86LDOUBLE
3585

    
3586
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3587
{
3588
    CPU86_LDoubleU temp;
3589
    int e;
3590

    
3591
    temp.d = f;
3592
    /* mantissa */
3593
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
3594
    /* exponent + sign */
3595
    e = EXPD(temp) - EXPBIAS + 16383;
3596
    e |= SIGND(temp) >> 16;
3597
    *pexp = e;
3598
}
3599

    
3600
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3601
{
3602
    CPU86_LDoubleU temp;
3603
    int e;
3604
    uint64_t ll;
3605

    
3606
    /* XXX: handle overflow ? */
3607
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3608
    e |= (upper >> 4) & 0x800; /* sign */
3609
    ll = (mant >> 11) & ((1LL << 52) - 1);
3610
#ifdef __arm__
3611
    temp.l.upper = (e << 20) | (ll >> 32);
3612
    temp.l.lower = ll;
3613
#else
3614
    temp.ll = ll | ((uint64_t)e << 52);
3615
#endif
3616
    return temp.d;
3617
}
3618

    
3619
#else
3620

    
3621
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3622
{
3623
    CPU86_LDoubleU temp;
3624

    
3625
    temp.d = f;
3626
    *pmant = temp.l.lower;
3627
    *pexp = temp.l.upper;
3628
}
3629

    
3630
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3631
{
3632
    CPU86_LDoubleU temp;
3633

    
3634
    temp.l.upper = upper;
3635
    temp.l.lower = mant;
3636
    return temp.d;
3637
}
3638
#endif
3639

    
3640
#ifdef TARGET_X86_64
3641

    
3642
//#define DEBUG_MULDIV
3643

    
3644
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3645
{
3646
    *plow += a;
3647
    /* carry test */
3648
    if (*plow < a)
3649
        (*phigh)++;
3650
    *phigh += b;
3651
}
3652

    
3653
static void neg128(uint64_t *plow, uint64_t *phigh)
3654
{
3655
    *plow = ~ *plow;
3656
    *phigh = ~ *phigh;
3657
    add128(plow, phigh, 1, 0);
3658
}
3659

    
3660
/* return TRUE if overflow */
3661
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3662
{
3663
    uint64_t q, r, a1, a0;
3664
    int i, qb, ab;
3665

    
3666
    a0 = *plow;
3667
    a1 = *phigh;
3668
    if (a1 == 0) {
3669
        q = a0 / b;
3670
        r = a0 % b;
3671
        *plow = q;
3672
        *phigh = r;
3673
    } else {
3674
        if (a1 >= b)
3675
            return 1;
3676
        /* XXX: use a better algorithm */
3677
        for(i = 0; i < 64; i++) {
3678
            ab = a1 >> 63;
3679
            a1 = (a1 << 1) | (a0 >> 63);
3680
            if (ab || a1 >= b) {
3681
                a1 -= b;
3682
                qb = 1;
3683
            } else {
3684
                qb = 0;
3685
            }
3686
            a0 = (a0 << 1) | qb;
3687
        }
3688
#if defined(DEBUG_MULDIV)
3689
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3690
               *phigh, *plow, b, a0, a1);
3691
#endif
3692
        *plow = a0;
3693
        *phigh = a1;
3694
    }
3695
    return 0;
3696
}
3697

    
3698
/* return TRUE if overflow */
3699
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3700
{
3701
    int sa, sb;
3702
    sa = ((int64_t)*phigh < 0);
3703
    if (sa)
3704
        neg128(plow, phigh);
3705
    sb = (b < 0);
3706
    if (sb)
3707
        b = -b;
3708
    if (div64(plow, phigh, b) != 0)
3709
        return 1;
3710
    if (sa ^ sb) {
3711
        if (*plow > (1ULL << 63))
3712
            return 1;
3713
        *plow = - *plow;
3714
    } else {
3715
        if (*plow >= (1ULL << 63))
3716
            return 1;
3717
    }
3718
    if (sa)
3719
        *phigh = - *phigh;
3720
    return 0;
3721
}
3722

    
3723
void helper_mulq_EAX_T0(void)
3724
{
3725
    uint64_t r0, r1;
3726

    
3727
    mulu64(&r0, &r1, EAX, T0);
3728
    EAX = r0;
3729
    EDX = r1;
3730
    CC_DST = r0;
3731
    CC_SRC = r1;
3732
}
3733

    
3734
void helper_imulq_EAX_T0(void)
3735
{
3736
    uint64_t r0, r1;
3737

    
3738
    muls64(&r0, &r1, EAX, T0);
3739
    EAX = r0;
3740
    EDX = r1;
3741
    CC_DST = r0;
3742
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3743
}
3744

    
3745
void helper_imulq_T0_T1(void)
3746
{
3747
    uint64_t r0, r1;
3748

    
3749
    muls64(&r0, &r1, T0, T1);
3750
    T0 = r0;
3751
    CC_DST = r0;
3752
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3753
}
3754

    
3755
void helper_divq_EAX_T0(void)
3756
{
3757
    uint64_t r0, r1;
3758
    if (T0 == 0) {
3759
        raise_exception(EXCP00_DIVZ);
3760
    }
3761
    r0 = EAX;
3762
    r1 = EDX;
3763
    if (div64(&r0, &r1, T0))
3764
        raise_exception(EXCP00_DIVZ);
3765
    EAX = r0;
3766
    EDX = r1;
3767
}
3768

    
3769
void helper_idivq_EAX_T0(void)
3770
{
3771
    uint64_t r0, r1;
3772
    if (T0 == 0) {
3773
        raise_exception(EXCP00_DIVZ);
3774
    }
3775
    r0 = EAX;
3776
    r1 = EDX;
3777
    if (idiv64(&r0, &r1, T0))
3778
        raise_exception(EXCP00_DIVZ);
3779
    EAX = r0;
3780
    EDX = r1;
3781
}
3782

    
3783
void helper_bswapq_T0(void)
3784
{
3785
    T0 = bswap64(T0);
3786
}
3787
#endif
3788

    
3789
void helper_hlt(void)
3790
{
3791
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3792
    env->hflags |= HF_HALTED_MASK;
3793
    env->exception_index = EXCP_HLT;
3794
    cpu_loop_exit();
3795
}
3796

    
3797
void helper_monitor(void)
3798
{
3799
    if ((uint32_t)ECX != 0)
3800
        raise_exception(EXCP0D_GPF);
3801
    /* XXX: store address ? */
3802
}
3803

    
3804
void helper_mwait(void)
3805
{
3806
    if ((uint32_t)ECX != 0)
3807
        raise_exception(EXCP0D_GPF);
3808
    /* XXX: not complete but not completely erroneous */
3809
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3810
        /* more than one CPU: do not sleep because another CPU may
3811
           wake this one */
3812
    } else {
3813
        helper_hlt();
3814
    }
3815
}
3816

    
3817
float approx_rsqrt(float a)
3818
{
3819
    return 1.0 / sqrt(a);
3820
}
3821

    
3822
float approx_rcp(float a)
3823
{
3824
    return 1.0 / a;
3825
}
3826

    
3827
void update_fp_status(void)
3828
{
3829
    int rnd_type;
3830

    
3831
    /* set rounding mode */
3832
    switch(env->fpuc & RC_MASK) {
3833
    default:
3834
    case RC_NEAR:
3835
        rnd_type = float_round_nearest_even;
3836
        break;
3837
    case RC_DOWN:
3838
        rnd_type = float_round_down;
3839
        break;
3840
    case RC_UP:
3841
        rnd_type = float_round_up;
3842
        break;
3843
    case RC_CHOP:
3844
        rnd_type = float_round_to_zero;
3845
        break;
3846
    }
3847
    set_float_rounding_mode(rnd_type, &env->fp_status);
3848
#ifdef FLOATX80
3849
    switch((env->fpuc >> 8) & 3) {
3850
    case 0:
3851
        rnd_type = 32;
3852
        break;
3853
    case 2:
3854
        rnd_type = 64;
3855
        break;
3856
    case 3:
3857
    default:
3858
        rnd_type = 80;
3859
        break;
3860
    }
3861
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3862
#endif
3863
}
3864

    
3865
#if !defined(CONFIG_USER_ONLY)
3866

    
3867
#define MMUSUFFIX _mmu
3868
#ifdef __s390__
3869
# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3870
#else
3871
# define GETPC() (__builtin_return_address(0))
3872
#endif
3873

    
3874
#define SHIFT 0
3875
#include "softmmu_template.h"
3876

    
3877
#define SHIFT 1
3878
#include "softmmu_template.h"
3879

    
3880
#define SHIFT 2
3881
#include "softmmu_template.h"
3882

    
3883
#define SHIFT 3
3884
#include "softmmu_template.h"
3885

    
3886
#endif
3887

    
3888
/* try to fill the TLB and return an exception if error. If retaddr is
3889
   NULL, it means that the function was called in C code (i.e. not
3890
   from generated code or from helper.c) */
3891
/* XXX: fix it to restore all registers */
3892
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3893
{
3894
    TranslationBlock *tb;
3895
    int ret;
3896
    unsigned long pc;
3897
    CPUX86State *saved_env;
3898

    
3899
    /* XXX: hack to restore env in all cases, even if not called from
3900
       generated code */
3901
    saved_env = env;
3902
    env = cpu_single_env;
3903

    
3904
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3905
    if (ret) {
3906
        if (retaddr) {
3907
            /* now we have a real cpu fault */
3908
            pc = (unsigned long)retaddr;
3909
            tb = tb_find_pc(pc);
3910
            if (tb) {
3911
                /* the PC is inside the translated code. It means that we have
3912
                   a virtual CPU fault */
3913
                cpu_restore_state(tb, env, pc, NULL);
3914
            }
3915
        }
3916
        if (retaddr)
3917
            raise_exception_err(env->exception_index, env->error_code);
3918
        else
3919
            raise_exception_err_norestore(env->exception_index, env->error_code);
3920
    }
3921
    env = saved_env;
3922
}
3923

    
3924

    
3925
/* Secure Virtual Machine helpers */
3926

    
3927
void helper_stgi(void)
3928
{
3929
    env->hflags |= HF_GIF_MASK;
3930
}
3931

    
3932
void helper_clgi(void)
3933
{
3934
    env->hflags &= ~HF_GIF_MASK;
3935
}
3936

    
3937
#if defined(CONFIG_USER_ONLY)
3938

    
3939
void helper_vmrun(target_ulong addr) { }
3940
void helper_vmmcall(void) { }
3941
void helper_vmload(target_ulong addr) { }
3942
void helper_vmsave(target_ulong addr) { }
3943
void helper_skinit(void) { }
3944
void helper_invlpga(void) { }
3945
void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3946
int svm_check_intercept_param(uint32_t type, uint64_t param)
3947
{
3948
    return 0;
3949
}
3950

    
3951
#else
3952

    
3953
static inline uint32_t
3954
vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3955
{
3956
    return    ((vmcb_attrib & 0x00ff) << 8)          /* Type, S, DPL, P */
3957
            | ((vmcb_attrib & 0x0f00) << 12)         /* AVL, L, DB, G */
3958
            | ((vmcb_base >> 16) & 0xff)             /* Base 23-16 */
3959
            | (vmcb_base & 0xff000000)               /* Base 31-24 */
3960
            | (vmcb_limit & 0xf0000);                /* Limit 19-16 */
3961
}
3962

    
3963
static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3964
{
3965
    return    ((cpu_attrib >> 8) & 0xff)             /* Type, S, DPL, P */
3966
            | ((cpu_attrib & 0xf00000) >> 12);       /* AVL, L, DB, G */
3967
}
3968

    
3969
extern uint8_t *phys_ram_base;
3970
void helper_vmrun(target_ulong addr)
3971
{
3972
    uint32_t event_inj;
3973
    uint32_t int_ctl;
3974

    
3975
    if (loglevel & CPU_LOG_TB_IN_ASM)
3976
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3977

    
3978
    env->vm_vmcb = addr;
3979
    regs_to_env();
3980

    
3981
    /* save the current CPU state in the hsave page */
3982
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3983
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3984

    
3985
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3986
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3987

    
3988
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3989
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3990
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3991
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3992
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3993
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3994
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3995

    
3996
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3997
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
3998

    
3999
    SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4000
    SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4001
    SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4002
    SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4003

    
4004
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4005
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4006
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4007

    
4008
    /* load the interception bitmaps so we do not need to access the
4009
       vmcb in svm mode */
4010
    /* We shift all the intercept bits so we can OR them with the TB
4011
       flags later on */
4012
    env->intercept            = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4013
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4014
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4015
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4016
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4017
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4018

    
4019
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4020
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4021

    
4022
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4023
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4024

    
4025
    /* clear exit_info_2 so we behave like the real hardware */
4026
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4027

    
4028
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4029
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4030
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4031
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4032
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4033
    if (int_ctl & V_INTR_MASKING_MASK) {
4034
        env->cr[8] = int_ctl & V_TPR_MASK;
4035
        if (env->eflags & IF_MASK)
4036
            env->hflags |= HF_HIF_MASK;
4037
    }
4038

    
4039
#ifdef TARGET_X86_64
4040
    env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4041
    env->hflags &= ~HF_LMA_MASK;
4042
    if (env->efer & MSR_EFER_LMA)
4043
       env->hflags |= HF_LMA_MASK;
4044
#endif
4045
    env->eflags = 0;
4046
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4047
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4048
    CC_OP = CC_OP_EFLAGS;
4049
    CC_DST = 0xffffffff;
4050

    
4051
    SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4052
    SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4053
    SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4054
    SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4055

    
4056
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4057
    env->eip = EIP;
4058
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4059
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4060
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4061
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4062
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4063

    
4064
    /* FIXME: guest state consistency checks */
4065

    
4066
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4067
        case TLB_CONTROL_DO_NOTHING:
4068
            break;
4069
        case TLB_CONTROL_FLUSH_ALL_ASID:
4070
            /* FIXME: this is not 100% correct but should work for now */
4071
            tlb_flush(env, 1);
4072
        break;
4073
    }
4074

    
4075
    helper_stgi();
4076

    
4077
    regs_to_env();
4078

    
4079
    /* maybe we need to inject an event */
4080
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4081
    if (event_inj & SVM_EVTINJ_VALID) {
4082
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4083
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4084
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4085
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4086

    
4087
        if (loglevel & CPU_LOG_TB_IN_ASM)
4088
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4089
        /* FIXME: need to implement valid_err */
4090
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4091
        case SVM_EVTINJ_TYPE_INTR:
4092
                env->exception_index = vector;
4093
                env->error_code = event_inj_err;
4094
                env->exception_is_int = 1;
4095
                env->exception_next_eip = -1;
4096
                if (loglevel & CPU_LOG_TB_IN_ASM)
4097
                    fprintf(logfile, "INTR");
4098
                break;
4099
        case SVM_EVTINJ_TYPE_NMI:
4100
                env->exception_index = vector;
4101
                env->error_code = event_inj_err;
4102
                env->exception_is_int = 1;
4103
                env->exception_next_eip = EIP;
4104
                if (loglevel & CPU_LOG_TB_IN_ASM)
4105
                    fprintf(logfile, "NMI");
4106
                break;
4107
        case SVM_EVTINJ_TYPE_EXEPT:
4108
                env->exception_index = vector;
4109
                env->error_code = event_inj_err;
4110
                env->exception_is_int = 0;
4111
                env->exception_next_eip = -1;
4112
                if (loglevel & CPU_LOG_TB_IN_ASM)
4113
                    fprintf(logfile, "EXEPT");
4114
                break;
4115
        case SVM_EVTINJ_TYPE_SOFT:
4116
                env->exception_index = vector;
4117
                env->error_code = event_inj_err;
4118
                env->exception_is_int = 1;
4119
                env->exception_next_eip = EIP;
4120
                if (loglevel & CPU_LOG_TB_IN_ASM)
4121
                    fprintf(logfile, "SOFT");
4122
                break;
4123
        }
4124
        if (loglevel & CPU_LOG_TB_IN_ASM)
4125
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4126
    }
4127
    if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4128
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4129
    }
4130

    
4131
    cpu_loop_exit();
4132
}
4133

    
4134
void helper_vmmcall(void)
4135
{
4136
    if (loglevel & CPU_LOG_TB_IN_ASM)
4137
        fprintf(logfile,"vmmcall!\n");
4138
}
4139

    
4140
void helper_vmload(target_ulong addr)
4141
{
4142
    if (loglevel & CPU_LOG_TB_IN_ASM)
4143
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4144
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4145
                env->segs[R_FS].base);
4146

    
4147
    SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4148
    SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4149
    SVM_LOAD_SEG2(addr, tr, tr);
4150
    SVM_LOAD_SEG2(addr, ldt, ldtr);
4151

    
4152
#ifdef TARGET_X86_64
4153
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4154
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4155
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4156
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4157
#endif
4158
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4159
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4160
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4161
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4162
}
4163

    
4164
void helper_vmsave(target_ulong addr)
4165
{
4166
    if (loglevel & CPU_LOG_TB_IN_ASM)
4167
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4168
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4169
                env->segs[R_FS].base);
4170

    
4171
    SVM_SAVE_SEG(addr, segs[R_FS], fs);
4172
    SVM_SAVE_SEG(addr, segs[R_GS], gs);
4173
    SVM_SAVE_SEG(addr, tr, tr);
4174
    SVM_SAVE_SEG(addr, ldt, ldtr);
4175

    
4176
#ifdef TARGET_X86_64
4177
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4178
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4179
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4180
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4181
#endif
4182
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4183
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4184
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4185
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4186
}
4187

    
4188
void helper_skinit(void)
4189
{
4190
    if (loglevel & CPU_LOG_TB_IN_ASM)
4191
        fprintf(logfile,"skinit!\n");
4192
}
4193

    
4194
void helper_invlpga(void)
4195
{
4196
    tlb_flush(env, 0);
4197
}
4198

    
4199
int svm_check_intercept_param(uint32_t type, uint64_t param)
4200
{
4201
    switch(type) {
4202
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4203
        if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4204
            vmexit(type, param);
4205
            return 1;
4206
        }
4207
        break;
4208
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4209
        if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4210
            vmexit(type, param);
4211
            return 1;
4212
        }
4213
        break;
4214
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4215
        if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4216
            vmexit(type, param);
4217
            return 1;
4218
        }
4219
        break;
4220
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4221
        if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4222
            vmexit(type, param);
4223
            return 1;
4224
        }
4225
        break;
4226
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4227
        if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4228
            vmexit(type, param);
4229
            return 1;
4230
        }
4231
        break;
4232
    case SVM_EXIT_IOIO:
4233
        if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4234
            /* FIXME: this should be read in at vmrun (faster this way?) */
4235
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4236
            uint16_t port = (uint16_t) (param >> 16);
4237

    
4238
            if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4239
                vmexit(type, param);
4240
        }
4241
        break;
4242

    
4243
    case SVM_EXIT_MSR:
4244
        if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4245
            /* FIXME: this should be read in at vmrun (faster this way?) */
4246
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4247
            switch((uint32_t)ECX) {
4248
            case 0 ... 0x1fff:
4249
                T0 = (ECX * 2) % 8;
4250
                T1 = ECX / 8;
4251
                break;
4252
            case 0xc0000000 ... 0xc0001fff:
4253
                T0 = (8192 + ECX - 0xc0000000) * 2;
4254
                T1 = (T0 / 8);
4255
                T0 %= 8;
4256
                break;
4257
            case 0xc0010000 ... 0xc0011fff:
4258
                T0 = (16384 + ECX - 0xc0010000) * 2;
4259
                T1 = (T0 / 8);
4260
                T0 %= 8;
4261
                break;
4262
            default:
4263
                vmexit(type, param);
4264
                return 1;
4265
            }
4266
            if (ldub_phys(addr + T1) & ((1 << param) << T0))
4267
                vmexit(type, param);
4268
            return 1;
4269
        }
4270
        break;
4271
    default:
4272
        if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4273
            vmexit(type, param);
4274
            return 1;
4275
        }
4276
        break;
4277
    }
4278
    return 0;
4279
}
4280

    
4281
void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4282
{
4283
    uint32_t int_ctl;
4284

    
4285
    if (loglevel & CPU_LOG_TB_IN_ASM)
4286
        fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4287
                exit_code, exit_info_1,
4288
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4289
                EIP);
4290

    
4291
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4292
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4293
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4294
    } else {
4295
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4296
    }
4297

    
4298
    /* Save the VM state in the vmcb */
4299
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4300
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4301
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4302
    SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4303

    
4304
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4305
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4306

    
4307
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4308
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4309

    
4310
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4311
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4312
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4313
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4314
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4315

    
4316
    if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4317
        int_ctl &= ~V_TPR_MASK;
4318
        int_ctl |= env->cr[8] & V_TPR_MASK;
4319
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4320
    }
4321

    
4322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4324
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4325
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4326
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4328
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4329

    
4330
    /* Reload the host state from vm_hsave */
4331
    env->hflags &= ~HF_HIF_MASK;
4332
    env->intercept = 0;
4333
    env->intercept_exceptions = 0;
4334
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4335

    
4336
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4337
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4338

    
4339
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4340
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4341

    
4342
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4343
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4344
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4345
    if (int_ctl & V_INTR_MASKING_MASK)
4346
        env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4347
    /* we need to set the efer after the crs so the hidden flags get set properly */
4348
#ifdef TARGET_X86_64
4349
    env->efer  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4350
    env->hflags &= ~HF_LMA_MASK;
4351
    if (env->efer & MSR_EFER_LMA)
4352
       env->hflags |= HF_LMA_MASK;
4353
#endif
4354

    
4355
    env->eflags = 0;
4356
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4357
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4358
    CC_OP = CC_OP_EFLAGS;
4359

    
4360
    SVM_LOAD_SEG(env->vm_hsave, ES, es);
4361
    SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4362
    SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4363
    SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4364

    
4365
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4366
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4367
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4368

    
4369
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4370
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4371

    
4372
    /* other setups */
4373
    cpu_x86_set_cpl(env, 0);
4374
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4375
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4376
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4377

    
4378
    helper_clgi();
4379
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4380

    
4381
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4382

    
4383
    /* Clears the TSC_OFFSET inside the processor. */
4384

    
4385
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4386
       from the page table indicated the host's CR3. If the PDPEs contain
4387
       illegal state, the processor causes a shutdown. */
4388

    
4389
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4390
    env->cr[0] |= CR0_PE_MASK;
4391
    env->eflags &= ~VM_MASK;
4392

    
4393
    /* Disables all breakpoints in the host DR7 register. */
4394

    
4395
    /* Checks the reloaded host state for consistency. */
4396

    
4397
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4398
       host's code segment or non-canonical (in the case of long mode), a
4399
       #GP fault is delivered inside the host.) */
4400

    
4401
    /* remove any pending exception */
4402
    env->exception_index = -1;
4403
    env->error_code = 0;
4404
    env->old_exception = -1;
4405

    
4406
    regs_to_env();
4407
    cpu_loop_exit();
4408
}
4409

    
4410
#endif