Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 8167ee88

History | View | Annotate | Download (159.1 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#define CPU_NO_GLOBAL_REGS
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26

    
27
#ifdef DEBUG_PCALL
28
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29
#  define LOG_PCALL_STATE(env) \
30
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
31
#else
32
#  define LOG_PCALL(...) do { } while (0)
33
#  define LOG_PCALL_STATE(env) do { } while (0)
34
#endif
35

    
36

    
37
#if 0
38
#define raise_exception_err(a, b)\
39
do {\
40
    qemu_log("raise_exception line=%d\n", __LINE__);\
41
    (raise_exception_err)(a, b);\
42
} while (0)
43
#endif
44

    
45
static const uint8_t parity_table[256] = {
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78
};
79

    
80
/* modulo 17 table */
81
static const uint8_t rclw_table[32] = {
82
    0, 1, 2, 3, 4, 5, 6, 7,
83
    8, 9,10,11,12,13,14,15,
84
   16, 0, 1, 2, 3, 4, 5, 6,
85
    7, 8, 9,10,11,12,13,14,
86
};
87

    
88
/* modulo 9 table */
89
static const uint8_t rclb_table[32] = {
90
    0, 1, 2, 3, 4, 5, 6, 7,
91
    8, 0, 1, 2, 3, 4, 5, 6,
92
    7, 8, 0, 1, 2, 3, 4, 5,
93
    6, 7, 8, 0, 1, 2, 3, 4,
94
};
95

    
96
static const CPU86_LDouble f15rk[7] =
97
{
98
    0.00000000000000000000L,
99
    1.00000000000000000000L,
100
    3.14159265358979323851L,  /*pi*/
101
    0.30102999566398119523L,  /*lg2*/
102
    0.69314718055994530943L,  /*ln2*/
103
    1.44269504088896340739L,  /*l2e*/
104
    3.32192809488736234781L,  /*l2t*/
105
};
106

    
107
/* broken thread support */
108

    
109
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
110

    
111
void helper_lock(void)
112
{
113
    spin_lock(&global_cpu_lock);
114
}
115

    
116
void helper_unlock(void)
117
{
118
    spin_unlock(&global_cpu_lock);
119
}
120

    
121
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
122
{
123
    load_eflags(t0, update_mask);
124
}
125

    
126
target_ulong helper_read_eflags(void)
127
{
128
    uint32_t eflags;
129
    eflags = helper_cc_compute_all(CC_OP);
130
    eflags |= (DF & DF_MASK);
131
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
132
    return eflags;
133
}
134

    
135
/* return non zero if error */
136
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
137
                               int selector)
138
{
139
    SegmentCache *dt;
140
    int index;
141
    target_ulong ptr;
142

    
143
    if (selector & 0x4)
144
        dt = &env->ldt;
145
    else
146
        dt = &env->gdt;
147
    index = selector & ~7;
148
    if ((index + 7) > dt->limit)
149
        return -1;
150
    ptr = dt->base + index;
151
    *e1_ptr = ldl_kernel(ptr);
152
    *e2_ptr = ldl_kernel(ptr + 4);
153
    return 0;
154
}
155

    
156
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
157
{
158
    unsigned int limit;
159
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
160
    if (e2 & DESC_G_MASK)
161
        limit = (limit << 12) | 0xfff;
162
    return limit;
163
}
164

    
165
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
166
{
167
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
168
}
169

    
170
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
171
{
172
    sc->base = get_seg_base(e1, e2);
173
    sc->limit = get_seg_limit(e1, e2);
174
    sc->flags = e2;
175
}
176

    
177
/* init the segment cache in vm86 mode. */
178
static inline void load_seg_vm(int seg, int selector)
179
{
180
    selector &= 0xffff;
181
    cpu_x86_load_seg_cache(env, seg, selector,
182
                           (selector << 4), 0xffff, 0);
183
}
184

    
185
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
186
                                       uint32_t *esp_ptr, int dpl)
187
{
188
    int type, index, shift;
189

    
190
#if 0
191
    {
192
        int i;
193
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
194
        for(i=0;i<env->tr.limit;i++) {
195
            printf("%02x ", env->tr.base[i]);
196
            if ((i & 7) == 7) printf("\n");
197
        }
198
        printf("\n");
199
    }
200
#endif
201

    
202
    if (!(env->tr.flags & DESC_P_MASK))
203
        cpu_abort(env, "invalid tss");
204
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
205
    if ((type & 7) != 1)
206
        cpu_abort(env, "invalid tss type");
207
    shift = type >> 3;
208
    index = (dpl * 4 + 2) << shift;
209
    if (index + (4 << shift) - 1 > env->tr.limit)
210
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
211
    if (shift == 0) {
212
        *esp_ptr = lduw_kernel(env->tr.base + index);
213
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
214
    } else {
215
        *esp_ptr = ldl_kernel(env->tr.base + index);
216
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
217
    }
218
}
219

    
220
/* XXX: merge with load_seg() */
221
static void tss_load_seg(int seg_reg, int selector)
222
{
223
    uint32_t e1, e2;
224
    int rpl, dpl, cpl;
225

    
226
    if ((selector & 0xfffc) != 0) {
227
        if (load_segment(&e1, &e2, selector) != 0)
228
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        if (!(e2 & DESC_S_MASK))
230
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
        rpl = selector & 3;
232
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
233
        cpl = env->hflags & HF_CPL_MASK;
234
        if (seg_reg == R_CS) {
235
            if (!(e2 & DESC_CS_MASK))
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            /* XXX: is it correct ? */
238
            if (dpl != rpl)
239
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240
            if ((e2 & DESC_C_MASK) && dpl > rpl)
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
        } else if (seg_reg == R_SS) {
243
            /* SS must be writable data */
244
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
245
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            if (dpl != cpl || dpl != rpl)
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
        } else {
249
            /* not readable code */
250
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
            /* if data or non conforming code, checks the rights */
253
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
254
                if (dpl < cpl || dpl < rpl)
255
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            }
257
        }
258
        if (!(e2 & DESC_P_MASK))
259
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
260
        cpu_x86_load_seg_cache(env, seg_reg, selector,
261
                       get_seg_base(e1, e2),
262
                       get_seg_limit(e1, e2),
263
                       e2);
264
    } else {
265
        if (seg_reg == R_SS || seg_reg == R_CS)
266
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
267
    }
268
}
269

    
270
#define SWITCH_TSS_JMP  0
271
#define SWITCH_TSS_IRET 1
272
#define SWITCH_TSS_CALL 2
273

    
274
/* XXX: restore CPU state in registers (PowerPC case) */
275
static void switch_tss(int tss_selector,
276
                       uint32_t e1, uint32_t e2, int source,
277
                       uint32_t next_eip)
278
{
279
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
280
    target_ulong tss_base;
281
    uint32_t new_regs[8], new_segs[6];
282
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
283
    uint32_t old_eflags, eflags_mask;
284
    SegmentCache *dt;
285
    int index;
286
    target_ulong ptr;
287

    
288
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
289
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
290

    
291
    /* if task gate, we read the TSS segment and we load it */
292
    if (type == 5) {
293
        if (!(e2 & DESC_P_MASK))
294
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
295
        tss_selector = e1 >> 16;
296
        if (tss_selector & 4)
297
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
        if (load_segment(&e1, &e2, tss_selector) != 0)
299
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
        if (e2 & DESC_S_MASK)
301
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
303
        if ((type & 7) != 1)
304
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305
    }
306

    
307
    if (!(e2 & DESC_P_MASK))
308
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309

    
310
    if (type & 8)
311
        tss_limit_max = 103;
312
    else
313
        tss_limit_max = 43;
314
    tss_limit = get_seg_limit(e1, e2);
315
    tss_base = get_seg_base(e1, e2);
316
    if ((tss_selector & 4) != 0 ||
317
        tss_limit < tss_limit_max)
318
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
319
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320
    if (old_type & 8)
321
        old_tss_limit_max = 103;
322
    else
323
        old_tss_limit_max = 43;
324

    
325
    /* read all the registers from the new TSS */
326
    if (type & 8) {
327
        /* 32 bit */
328
        new_cr3 = ldl_kernel(tss_base + 0x1c);
329
        new_eip = ldl_kernel(tss_base + 0x20);
330
        new_eflags = ldl_kernel(tss_base + 0x24);
331
        for(i = 0; i < 8; i++)
332
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
333
        for(i = 0; i < 6; i++)
334
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
335
        new_ldt = lduw_kernel(tss_base + 0x60);
336
        new_trap = ldl_kernel(tss_base + 0x64);
337
    } else {
338
        /* 16 bit */
339
        new_cr3 = 0;
340
        new_eip = lduw_kernel(tss_base + 0x0e);
341
        new_eflags = lduw_kernel(tss_base + 0x10);
342
        for(i = 0; i < 8; i++)
343
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
344
        for(i = 0; i < 4; i++)
345
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
346
        new_ldt = lduw_kernel(tss_base + 0x2a);
347
        new_segs[R_FS] = 0;
348
        new_segs[R_GS] = 0;
349
        new_trap = 0;
350
    }
351

    
352
    /* NOTE: we must avoid memory exceptions during the task switch,
353
       so we make dummy accesses before */
354
    /* XXX: it can still fail in some cases, so a bigger hack is
355
       necessary to valid the TLB after having done the accesses */
356

    
357
    v1 = ldub_kernel(env->tr.base);
358
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
359
    stb_kernel(env->tr.base, v1);
360
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
361

    
362
    /* clear busy bit (it is restartable) */
363
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
364
        target_ulong ptr;
365
        uint32_t e2;
366
        ptr = env->gdt.base + (env->tr.selector & ~7);
367
        e2 = ldl_kernel(ptr + 4);
368
        e2 &= ~DESC_TSS_BUSY_MASK;
369
        stl_kernel(ptr + 4, e2);
370
    }
371
    old_eflags = compute_eflags();
372
    if (source == SWITCH_TSS_IRET)
373
        old_eflags &= ~NT_MASK;
374

    
375
    /* save the current state in the old TSS */
376
    if (type & 8) {
377
        /* 32 bit */
378
        stl_kernel(env->tr.base + 0x20, next_eip);
379
        stl_kernel(env->tr.base + 0x24, old_eflags);
380
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
381
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
382
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
383
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
384
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
385
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
386
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
387
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
388
        for(i = 0; i < 6; i++)
389
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390
    } else {
391
        /* 16 bit */
392
        stw_kernel(env->tr.base + 0x0e, next_eip);
393
        stw_kernel(env->tr.base + 0x10, old_eflags);
394
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
395
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
396
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
397
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
398
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
399
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
400
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
401
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
402
        for(i = 0; i < 4; i++)
403
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404
    }
405

    
406
    /* now if an exception occurs, it will occurs in the next task
407
       context */
408

    
409
    if (source == SWITCH_TSS_CALL) {
410
        stw_kernel(tss_base, env->tr.selector);
411
        new_eflags |= NT_MASK;
412
    }
413

    
414
    /* set busy bit */
415
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
416
        target_ulong ptr;
417
        uint32_t e2;
418
        ptr = env->gdt.base + (tss_selector & ~7);
419
        e2 = ldl_kernel(ptr + 4);
420
        e2 |= DESC_TSS_BUSY_MASK;
421
        stl_kernel(ptr + 4, e2);
422
    }
423

    
424
    /* set the new CPU state */
425
    /* from this point, any exception which occurs can give problems */
426
    env->cr[0] |= CR0_TS_MASK;
427
    env->hflags |= HF_TS_MASK;
428
    env->tr.selector = tss_selector;
429
    env->tr.base = tss_base;
430
    env->tr.limit = tss_limit;
431
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
432

    
433
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
434
        cpu_x86_update_cr3(env, new_cr3);
435
    }
436

    
437
    /* load all registers without an exception, then reload them with
438
       possible exception */
439
    env->eip = new_eip;
440
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
441
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
442
    if (!(type & 8))
443
        eflags_mask &= 0xffff;
444
    load_eflags(new_eflags, eflags_mask);
445
    /* XXX: what to do in 16 bit case ? */
446
    EAX = new_regs[0];
447
    ECX = new_regs[1];
448
    EDX = new_regs[2];
449
    EBX = new_regs[3];
450
    ESP = new_regs[4];
451
    EBP = new_regs[5];
452
    ESI = new_regs[6];
453
    EDI = new_regs[7];
454
    if (new_eflags & VM_MASK) {
455
        for(i = 0; i < 6; i++)
456
            load_seg_vm(i, new_segs[i]);
457
        /* in vm86, CPL is always 3 */
458
        cpu_x86_set_cpl(env, 3);
459
    } else {
460
        /* CPL is set the RPL of CS */
461
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
462
        /* first just selectors as the rest may trigger exceptions */
463
        for(i = 0; i < 6; i++)
464
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465
    }
466

    
467
    env->ldt.selector = new_ldt & ~4;
468
    env->ldt.base = 0;
469
    env->ldt.limit = 0;
470
    env->ldt.flags = 0;
471

    
472
    /* load the LDT */
473
    if (new_ldt & 4)
474
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
475

    
476
    if ((new_ldt & 0xfffc) != 0) {
477
        dt = &env->gdt;
478
        index = new_ldt & ~7;
479
        if ((index + 7) > dt->limit)
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        ptr = dt->base + index;
482
        e1 = ldl_kernel(ptr);
483
        e2 = ldl_kernel(ptr + 4);
484
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        if (!(e2 & DESC_P_MASK))
487
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
489
    }
490

    
491
    /* load the segments */
492
    if (!(new_eflags & VM_MASK)) {
493
        tss_load_seg(R_CS, new_segs[R_CS]);
494
        tss_load_seg(R_SS, new_segs[R_SS]);
495
        tss_load_seg(R_ES, new_segs[R_ES]);
496
        tss_load_seg(R_DS, new_segs[R_DS]);
497
        tss_load_seg(R_FS, new_segs[R_FS]);
498
        tss_load_seg(R_GS, new_segs[R_GS]);
499
    }
500

    
501
    /* check that EIP is in the CS segment limits */
502
    if (new_eip > env->segs[R_CS].limit) {
503
        /* XXX: different exception if CALL ? */
504
        raise_exception_err(EXCP0D_GPF, 0);
505
    }
506

    
507
#ifndef CONFIG_USER_ONLY
508
    /* reset local breakpoints */
509
    if (env->dr[7] & 0x55) {
510
        for (i = 0; i < 4; i++) {
511
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
512
                hw_breakpoint_remove(env, i);
513
        }
514
        env->dr[7] &= ~0x55;
515
    }
516
#endif
517
}
518

    
519
/* check if Port I/O is allowed in TSS */
520
static inline void check_io(int addr, int size)
521
{
522
    int io_offset, val, mask;
523

    
524
    /* TSS must be a valid 32 bit one */
525
    if (!(env->tr.flags & DESC_P_MASK) ||
526
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
527
        env->tr.limit < 103)
528
        goto fail;
529
    io_offset = lduw_kernel(env->tr.base + 0x66);
530
    io_offset += (addr >> 3);
531
    /* Note: the check needs two bytes */
532
    if ((io_offset + 1) > env->tr.limit)
533
        goto fail;
534
    val = lduw_kernel(env->tr.base + io_offset);
535
    val >>= (addr & 7);
536
    mask = (1 << size) - 1;
537
    /* all bits must be zero to allow the I/O */
538
    if ((val & mask) != 0) {
539
    fail:
540
        raise_exception_err(EXCP0D_GPF, 0);
541
    }
542
}
543

    
544
void helper_check_iob(uint32_t t0)
545
{
546
    check_io(t0, 1);
547
}
548

    
549
void helper_check_iow(uint32_t t0)
550
{
551
    check_io(t0, 2);
552
}
553

    
554
void helper_check_iol(uint32_t t0)
555
{
556
    check_io(t0, 4);
557
}
558

    
559
void helper_outb(uint32_t port, uint32_t data)
560
{
561
    cpu_outb(env, port, data & 0xff);
562
}
563

    
564
target_ulong helper_inb(uint32_t port)
565
{
566
    return cpu_inb(env, port);
567
}
568

    
569
void helper_outw(uint32_t port, uint32_t data)
570
{
571
    cpu_outw(env, port, data & 0xffff);
572
}
573

    
574
target_ulong helper_inw(uint32_t port)
575
{
576
    return cpu_inw(env, port);
577
}
578

    
579
void helper_outl(uint32_t port, uint32_t data)
580
{
581
    cpu_outl(env, port, data);
582
}
583

    
584
target_ulong helper_inl(uint32_t port)
585
{
586
    return cpu_inl(env, port);
587
}
588

    
589
static inline unsigned int get_sp_mask(unsigned int e2)
590
{
591
    if (e2 & DESC_B_MASK)
592
        return 0xffffffff;
593
    else
594
        return 0xffff;
595
}
596

    
597
static int exeption_has_error_code(int intno)
598
{
599
        switch(intno) {
600
        case 8:
601
        case 10:
602
        case 11:
603
        case 12:
604
        case 13:
605
        case 14:
606
        case 17:
607
            return 1;
608
        }
609
        return 0;
610
}
611

    
612
#ifdef TARGET_X86_64
613
#define SET_ESP(val, sp_mask)\
614
do {\
615
    if ((sp_mask) == 0xffff)\
616
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617
    else if ((sp_mask) == 0xffffffffLL)\
618
        ESP = (uint32_t)(val);\
619
    else\
620
        ESP = (val);\
621
} while (0)
622
#else
623
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
624
#endif
625

    
626
/* in 64-bit machines, this can overflow. So this segment addition macro
627
 * can be used to trim the value to 32-bit whenever needed */
628
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
629

    
630
/* XXX: add a is_user flag to have proper security support */
631
#define PUSHW(ssp, sp, sp_mask, val)\
632
{\
633
    sp -= 2;\
634
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635
}
636

    
637
#define PUSHL(ssp, sp, sp_mask, val)\
638
{\
639
    sp -= 4;\
640
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
641
}
642

    
643
#define POPW(ssp, sp, sp_mask, val)\
644
{\
645
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
646
    sp += 2;\
647
}
648

    
649
#define POPL(ssp, sp, sp_mask, val)\
650
{\
651
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
652
    sp += 4;\
653
}
654

    
655
/* protected mode interrupt */
656
static void do_interrupt_protected(int intno, int is_int, int error_code,
657
                                   unsigned int next_eip, int is_hw)
658
{
659
    SegmentCache *dt;
660
    target_ulong ptr, ssp;
661
    int type, dpl, selector, ss_dpl, cpl;
662
    int has_error_code, new_stack, shift;
663
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
664
    uint32_t old_eip, sp_mask;
665

    
666
    has_error_code = 0;
667
    if (!is_int && !is_hw)
668
        has_error_code = exeption_has_error_code(intno);
669
    if (is_int)
670
        old_eip = next_eip;
671
    else
672
        old_eip = env->eip;
673

    
674
    dt = &env->idt;
675
    if (intno * 8 + 7 > dt->limit)
676
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
677
    ptr = dt->base + intno * 8;
678
    e1 = ldl_kernel(ptr);
679
    e2 = ldl_kernel(ptr + 4);
680
    /* check gate type */
681
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
682
    switch(type) {
683
    case 5: /* task gate */
684
        /* must do that check here to return the correct error code */
685
        if (!(e2 & DESC_P_MASK))
686
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
687
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
688
        if (has_error_code) {
689
            int type;
690
            uint32_t mask;
691
            /* push the error code */
692
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
693
            shift = type >> 3;
694
            if (env->segs[R_SS].flags & DESC_B_MASK)
695
                mask = 0xffffffff;
696
            else
697
                mask = 0xffff;
698
            esp = (ESP - (2 << shift)) & mask;
699
            ssp = env->segs[R_SS].base + esp;
700
            if (shift)
701
                stl_kernel(ssp, error_code);
702
            else
703
                stw_kernel(ssp, error_code);
704
            SET_ESP(esp, mask);
705
        }
706
        return;
707
    case 6: /* 286 interrupt gate */
708
    case 7: /* 286 trap gate */
709
    case 14: /* 386 interrupt gate */
710
    case 15: /* 386 trap gate */
711
        break;
712
    default:
713
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
714
        break;
715
    }
716
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
717
    cpl = env->hflags & HF_CPL_MASK;
718
    /* check privilege if software int */
719
    if (is_int && dpl < cpl)
720
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
721
    /* check valid bit */
722
    if (!(e2 & DESC_P_MASK))
723
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
724
    selector = e1 >> 16;
725
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
726
    if ((selector & 0xfffc) == 0)
727
        raise_exception_err(EXCP0D_GPF, 0);
728

    
729
    if (load_segment(&e1, &e2, selector) != 0)
730
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
732
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
734
    if (dpl > cpl)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_P_MASK))
737
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
738
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
739
        /* to inner privilege */
740
        get_ss_esp_from_tss(&ss, &esp, dpl);
741
        if ((ss & 0xfffc) == 0)
742
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
        if ((ss & 3) != dpl)
744
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
746
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
748
        if (ss_dpl != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (!(ss_e2 & DESC_S_MASK) ||
751
            (ss_e2 & DESC_CS_MASK) ||
752
            !(ss_e2 & DESC_W_MASK))
753
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754
        if (!(ss_e2 & DESC_P_MASK))
755
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756
        new_stack = 1;
757
        sp_mask = get_sp_mask(ss_e2);
758
        ssp = get_seg_base(ss_e1, ss_e2);
759
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
760
        /* to same privilege */
761
        if (env->eflags & VM_MASK)
762
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763
        new_stack = 0;
764
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
765
        ssp = env->segs[R_SS].base;
766
        esp = ESP;
767
        dpl = cpl;
768
    } else {
769
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
770
        new_stack = 0; /* avoid warning */
771
        sp_mask = 0; /* avoid warning */
772
        ssp = 0; /* avoid warning */
773
        esp = 0; /* avoid warning */
774
    }
775

    
776
    shift = type >> 3;
777

    
778
#if 0
779
    /* XXX: check that enough room is available */
780
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781
    if (env->eflags & VM_MASK)
782
        push_size += 8;
783
    push_size <<= shift;
784
#endif
785
    if (shift == 1) {
786
        if (new_stack) {
787
            if (env->eflags & VM_MASK) {
788
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
792
            }
793
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
794
            PUSHL(ssp, esp, sp_mask, ESP);
795
        }
796
        PUSHL(ssp, esp, sp_mask, compute_eflags());
797
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
798
        PUSHL(ssp, esp, sp_mask, old_eip);
799
        if (has_error_code) {
800
            PUSHL(ssp, esp, sp_mask, error_code);
801
        }
802
    } else {
803
        if (new_stack) {
804
            if (env->eflags & VM_MASK) {
805
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
809
            }
810
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
811
            PUSHW(ssp, esp, sp_mask, ESP);
812
        }
813
        PUSHW(ssp, esp, sp_mask, compute_eflags());
814
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
815
        PUSHW(ssp, esp, sp_mask, old_eip);
816
        if (has_error_code) {
817
            PUSHW(ssp, esp, sp_mask, error_code);
818
        }
819
    }
820

    
821
    if (new_stack) {
822
        if (env->eflags & VM_MASK) {
823
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
824
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
827
        }
828
        ss = (ss & ~3) | dpl;
829
        cpu_x86_load_seg_cache(env, R_SS, ss,
830
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
831
    }
832
    SET_ESP(esp, sp_mask);
833

    
834
    selector = (selector & ~3) | dpl;
835
    cpu_x86_load_seg_cache(env, R_CS, selector,
836
                   get_seg_base(e1, e2),
837
                   get_seg_limit(e1, e2),
838
                   e2);
839
    cpu_x86_set_cpl(env, dpl);
840
    env->eip = offset;
841

    
842
    /* interrupt gate clear IF mask */
843
    if ((type & 1) == 0) {
844
        env->eflags &= ~IF_MASK;
845
    }
846
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
847
}
848

    
849
#ifdef TARGET_X86_64
850

    
851
#define PUSHQ(sp, val)\
852
{\
853
    sp -= 8;\
854
    stq_kernel(sp, (val));\
855
}
856

    
857
#define POPQ(sp, val)\
858
{\
859
    val = ldq_kernel(sp);\
860
    sp += 8;\
861
}
862

    
863
static inline target_ulong get_rsp_from_tss(int level)
864
{
865
    int index;
866

    
867
#if 0
868
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
869
           env->tr.base, env->tr.limit);
870
#endif
871

    
872
    if (!(env->tr.flags & DESC_P_MASK))
873
        cpu_abort(env, "invalid tss");
874
    index = 8 * level + 4;
875
    if ((index + 7) > env->tr.limit)
876
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
877
    return ldq_kernel(env->tr.base + index);
878
}
879

    
880
/* 64 bit interrupt */
881
static void do_interrupt64(int intno, int is_int, int error_code,
882
                           target_ulong next_eip, int is_hw)
883
{
884
    SegmentCache *dt;
885
    target_ulong ptr;
886
    int type, dpl, selector, cpl, ist;
887
    int has_error_code, new_stack;
888
    uint32_t e1, e2, e3, ss;
889
    target_ulong old_eip, esp, offset;
890

    
891
    has_error_code = 0;
892
    if (!is_int && !is_hw)
893
        has_error_code = exeption_has_error_code(intno);
894
    if (is_int)
895
        old_eip = next_eip;
896
    else
897
        old_eip = env->eip;
898

    
899
    dt = &env->idt;
900
    if (intno * 16 + 15 > dt->limit)
901
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902
    ptr = dt->base + intno * 16;
903
    e1 = ldl_kernel(ptr);
904
    e2 = ldl_kernel(ptr + 4);
905
    e3 = ldl_kernel(ptr + 8);
906
    /* check gate type */
907
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908
    switch(type) {
909
    case 14: /* 386 interrupt gate */
910
    case 15: /* 386 trap gate */
911
        break;
912
    default:
913
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
        break;
915
    }
916
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
    cpl = env->hflags & HF_CPL_MASK;
918
    /* check privilege if software int */
919
    if (is_int && dpl < cpl)
920
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921
    /* check valid bit */
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924
    selector = e1 >> 16;
925
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926
    ist = e2 & 7;
927
    if ((selector & 0xfffc) == 0)
928
        raise_exception_err(EXCP0D_GPF, 0);
929

    
930
    if (load_segment(&e1, &e2, selector) != 0)
931
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935
    if (dpl > cpl)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_P_MASK))
938
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942
        /* to inner privilege */
943
        if (ist != 0)
944
            esp = get_rsp_from_tss(ist + 3);
945
        else
946
            esp = get_rsp_from_tss(dpl);
947
        esp &= ~0xfLL; /* align stack */
948
        ss = 0;
949
        new_stack = 1;
950
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951
        /* to same privilege */
952
        if (env->eflags & VM_MASK)
953
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
        new_stack = 0;
955
        if (ist != 0)
956
            esp = get_rsp_from_tss(ist + 3);
957
        else
958
            esp = ESP;
959
        esp &= ~0xfLL; /* align stack */
960
        dpl = cpl;
961
    } else {
962
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963
        new_stack = 0; /* avoid warning */
964
        esp = 0; /* avoid warning */
965
    }
966

    
967
    PUSHQ(esp, env->segs[R_SS].selector);
968
    PUSHQ(esp, ESP);
969
    PUSHQ(esp, compute_eflags());
970
    PUSHQ(esp, env->segs[R_CS].selector);
971
    PUSHQ(esp, old_eip);
972
    if (has_error_code) {
973
        PUSHQ(esp, error_code);
974
    }
975

    
976
    if (new_stack) {
977
        ss = 0 | dpl;
978
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979
    }
980
    ESP = esp;
981

    
982
    selector = (selector & ~3) | dpl;
983
    cpu_x86_load_seg_cache(env, R_CS, selector,
984
                   get_seg_base(e1, e2),
985
                   get_seg_limit(e1, e2),
986
                   e2);
987
    cpu_x86_set_cpl(env, dpl);
988
    env->eip = offset;
989

    
990
    /* interrupt gate clear IF mask */
991
    if ((type & 1) == 0) {
992
        env->eflags &= ~IF_MASK;
993
    }
994
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995
}
996
#endif
997

    
998
#ifdef TARGET_X86_64
999
#if defined(CONFIG_USER_ONLY)
1000
void helper_syscall(int next_eip_addend)
1001
{
1002
    env->exception_index = EXCP_SYSCALL;
1003
    env->exception_next_eip = env->eip + next_eip_addend;
1004
    cpu_loop_exit();
1005
}
1006
#else
1007
void helper_syscall(int next_eip_addend)
1008
{
1009
    int selector;
1010

    
1011
    if (!(env->efer & MSR_EFER_SCE)) {
1012
        raise_exception_err(EXCP06_ILLOP, 0);
1013
    }
1014
    selector = (env->star >> 32) & 0xffff;
1015
    if (env->hflags & HF_LMA_MASK) {
1016
        int code64;
1017

    
1018
        ECX = env->eip + next_eip_addend;
1019
        env->regs[11] = compute_eflags();
1020

    
1021
        code64 = env->hflags & HF_CS64_MASK;
1022

    
1023
        cpu_x86_set_cpl(env, 0);
1024
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025
                           0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK |
1028
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030
                               0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_W_MASK | DESC_A_MASK);
1034
        env->eflags &= ~env->fmask;
1035
        load_eflags(env->eflags, 0);
1036
        if (code64)
1037
            env->eip = env->lstar;
1038
        else
1039
            env->eip = env->cstar;
1040
    } else {
1041
        ECX = (uint32_t)(env->eip + next_eip_addend);
1042

    
1043
        cpu_x86_set_cpl(env, 0);
1044
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1045
                           0, 0xffffffff,
1046
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1047
                               DESC_S_MASK |
1048
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1049
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1050
                               0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_W_MASK | DESC_A_MASK);
1054
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1055
        env->eip = (uint32_t)env->star;
1056
    }
1057
}
1058
#endif
1059
#endif
1060

    
1061
#ifdef TARGET_X86_64
1062
void helper_sysret(int dflag)
1063
{
1064
    int cpl, selector;
1065

    
1066
    if (!(env->efer & MSR_EFER_SCE)) {
1067
        raise_exception_err(EXCP06_ILLOP, 0);
1068
    }
1069
    cpl = env->hflags & HF_CPL_MASK;
1070
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071
        raise_exception_err(EXCP0D_GPF, 0);
1072
    }
1073
    selector = (env->star >> 48) & 0xffff;
1074
    if (env->hflags & HF_LMA_MASK) {
1075
        if (dflag == 2) {
1076
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1077
                                   0, 0xffffffff,
1078
                                   DESC_G_MASK | DESC_P_MASK |
1079
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1081
                                   DESC_L_MASK);
1082
            env->eip = ECX;
1083
        } else {
1084
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1085
                                   0, 0xffffffff,
1086
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1089
            env->eip = (uint32_t)ECX;
1090
        }
1091
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1092
                               0, 0xffffffff,
1093
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095
                               DESC_W_MASK | DESC_A_MASK);
1096
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1097
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1098
        cpu_x86_set_cpl(env, 3);
1099
    } else {
1100
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1101
                               0, 0xffffffff,
1102
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1104
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1105
        env->eip = (uint32_t)ECX;
1106
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1107
                               0, 0xffffffff,
1108
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110
                               DESC_W_MASK | DESC_A_MASK);
1111
        env->eflags |= IF_MASK;
1112
        cpu_x86_set_cpl(env, 3);
1113
    }
1114
#ifdef CONFIG_KQEMU
1115
    if (kqemu_is_ok(env)) {
1116
        if (env->hflags & HF_LMA_MASK)
1117
            CC_OP = CC_OP_EFLAGS;
1118
        env->exception_index = -1;
1119
        cpu_loop_exit();
1120
    }
1121
#endif
1122
}
1123
#endif
1124

    
1125
/* real mode interrupt */
1126
static void do_interrupt_real(int intno, int is_int, int error_code,
1127
                              unsigned int next_eip)
1128
{
1129
    SegmentCache *dt;
1130
    target_ulong ptr, ssp;
1131
    int selector;
1132
    uint32_t offset, esp;
1133
    uint32_t old_cs, old_eip;
1134

    
1135
    /* real mode (simpler !) */
1136
    dt = &env->idt;
1137
    if (intno * 4 + 3 > dt->limit)
1138
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1139
    ptr = dt->base + intno * 4;
1140
    offset = lduw_kernel(ptr);
1141
    selector = lduw_kernel(ptr + 2);
1142
    esp = ESP;
1143
    ssp = env->segs[R_SS].base;
1144
    if (is_int)
1145
        old_eip = next_eip;
1146
    else
1147
        old_eip = env->eip;
1148
    old_cs = env->segs[R_CS].selector;
1149
    /* XXX: use SS segment size ? */
1150
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1151
    PUSHW(ssp, esp, 0xffff, old_cs);
1152
    PUSHW(ssp, esp, 0xffff, old_eip);
1153

    
1154
    /* update processor state */
1155
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1156
    env->eip = offset;
1157
    env->segs[R_CS].selector = selector;
1158
    env->segs[R_CS].base = (selector << 4);
1159
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1160
}
1161

    
1162
/* fake user mode interrupt */
1163
void do_interrupt_user(int intno, int is_int, int error_code,
1164
                       target_ulong next_eip)
1165
{
1166
    SegmentCache *dt;
1167
    target_ulong ptr;
1168
    int dpl, cpl, shift;
1169
    uint32_t e2;
1170

    
1171
    dt = &env->idt;
1172
    if (env->hflags & HF_LMA_MASK) {
1173
        shift = 4;
1174
    } else {
1175
        shift = 3;
1176
    }
1177
    ptr = dt->base + (intno << shift);
1178
    e2 = ldl_kernel(ptr + 4);
1179

    
1180
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1181
    cpl = env->hflags & HF_CPL_MASK;
1182
    /* check privilege if software int */
1183
    if (is_int && dpl < cpl)
1184
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1185

    
1186
    /* Since we emulate only user space, we cannot do more than
1187
       exiting the emulation with the suitable exception and error
1188
       code */
1189
    if (is_int)
1190
        EIP = next_eip;
1191
}
1192

    
1193
#if !defined(CONFIG_USER_ONLY)
1194
static void handle_even_inj(int intno, int is_int, int error_code,
1195
                int is_hw, int rm)
1196
{
1197
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1198
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1199
            int type;
1200
            if (is_int)
1201
                    type = SVM_EVTINJ_TYPE_SOFT;
1202
            else
1203
                    type = SVM_EVTINJ_TYPE_EXEPT;
1204
            event_inj = intno | type | SVM_EVTINJ_VALID;
1205
            if (!rm && exeption_has_error_code(intno)) {
1206
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1207
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1208
            }
1209
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1210
    }
1211
}
1212
#endif
1213

    
1214
/*
1215
 * Begin execution of an interruption. is_int is TRUE if coming from
1216
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1217
 * instruction. It is only relevant if is_int is TRUE.
1218
 */
1219
void do_interrupt(int intno, int is_int, int error_code,
1220
                  target_ulong next_eip, int is_hw)
1221
{
1222
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1223
        if ((env->cr[0] & CR0_PE_MASK)) {
1224
            static int count;
1225
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1226
                    count, intno, error_code, is_int,
1227
                    env->hflags & HF_CPL_MASK,
1228
                    env->segs[R_CS].selector, EIP,
1229
                    (int)env->segs[R_CS].base + EIP,
1230
                    env->segs[R_SS].selector, ESP);
1231
            if (intno == 0x0e) {
1232
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1233
            } else {
1234
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1235
            }
1236
            qemu_log("\n");
1237
            log_cpu_state(env, X86_DUMP_CCOP);
1238
#if 0
1239
            {
1240
                int i;
1241
                uint8_t *ptr;
1242
                qemu_log("       code=");
1243
                ptr = env->segs[R_CS].base + env->eip;
1244
                for(i = 0; i < 16; i++) {
1245
                    qemu_log(" %02x", ldub(ptr + i));
1246
                }
1247
                qemu_log("\n");
1248
            }
1249
#endif
1250
            count++;
1251
        }
1252
    }
1253
    if (env->cr[0] & CR0_PE_MASK) {
1254
#if !defined(CONFIG_USER_ONLY)
1255
        if (env->hflags & HF_SVMI_MASK)
1256
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1257
#endif
1258
#ifdef TARGET_X86_64
1259
        if (env->hflags & HF_LMA_MASK) {
1260
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1261
        } else
1262
#endif
1263
        {
1264
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1265
        }
1266
    } else {
1267
#if !defined(CONFIG_USER_ONLY)
1268
        if (env->hflags & HF_SVMI_MASK)
1269
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1270
#endif
1271
        do_interrupt_real(intno, is_int, error_code, next_eip);
1272
    }
1273

    
1274
#if !defined(CONFIG_USER_ONLY)
1275
    if (env->hflags & HF_SVMI_MASK) {
1276
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1277
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1278
    }
1279
#endif
1280
}
1281

    
1282
/* This should come from sysemu.h - if we could include it here... */
1283
void qemu_system_reset_request(void);
1284

    
1285
/*
1286
 * Check nested exceptions and change to double or triple fault if
1287
 * needed. It should only be called, if this is not an interrupt.
1288
 * Returns the new exception number.
1289
 */
1290
static int check_exception(int intno, int *error_code)
1291
{
1292
    int first_contributory = env->old_exception == 0 ||
1293
                              (env->old_exception >= 10 &&
1294
                               env->old_exception <= 13);
1295
    int second_contributory = intno == 0 ||
1296
                               (intno >= 10 && intno <= 13);
1297

    
1298
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1299
                env->old_exception, intno);
1300

    
1301
#if !defined(CONFIG_USER_ONLY)
1302
    if (env->old_exception == EXCP08_DBLE) {
1303
        if (env->hflags & HF_SVMI_MASK)
1304
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1305

    
1306
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1307

    
1308
        qemu_system_reset_request();
1309
        return EXCP_HLT;
1310
    }
1311
#endif
1312

    
1313
    if ((first_contributory && second_contributory)
1314
        || (env->old_exception == EXCP0E_PAGE &&
1315
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1316
        intno = EXCP08_DBLE;
1317
        *error_code = 0;
1318
    }
1319

    
1320
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1321
        (intno == EXCP08_DBLE))
1322
        env->old_exception = intno;
1323

    
1324
    return intno;
1325
}
1326

    
1327
/*
1328
 * Signal an interruption. It is executed in the main CPU loop.
1329
 * is_int is TRUE if coming from the int instruction. next_eip is the
1330
 * EIP value AFTER the interrupt instruction. It is only relevant if
1331
 * is_int is TRUE.
1332
 */
1333
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1334
                                          int next_eip_addend)
1335
{
1336
    if (!is_int) {
1337
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1338
        intno = check_exception(intno, &error_code);
1339
    } else {
1340
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1341
    }
1342

    
1343
    env->exception_index = intno;
1344
    env->error_code = error_code;
1345
    env->exception_is_int = is_int;
1346
    env->exception_next_eip = env->eip + next_eip_addend;
1347
    cpu_loop_exit();
1348
}
1349

    
1350
/* shortcuts to generate exceptions */
1351

    
1352
void raise_exception_err(int exception_index, int error_code)
1353
{
1354
    raise_interrupt(exception_index, 0, error_code, 0);
1355
}
1356

    
1357
void raise_exception(int exception_index)
1358
{
1359
    raise_interrupt(exception_index, 0, 0, 0);
1360
}
1361

    
1362
/* SMM support */
1363

    
1364
#if defined(CONFIG_USER_ONLY)
1365

    
1366
void do_smm_enter(void)
1367
{
1368
}
1369

    
1370
void helper_rsm(void)
1371
{
1372
}
1373

    
1374
#else
1375

    
1376
#ifdef TARGET_X86_64
1377
#define SMM_REVISION_ID 0x00020064
1378
#else
1379
#define SMM_REVISION_ID 0x00020000
1380
#endif
1381

    
1382
void do_smm_enter(void)
1383
{
1384
    target_ulong sm_state;
1385
    SegmentCache *dt;
1386
    int i, offset;
1387

    
1388
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1389
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1390

    
1391
    env->hflags |= HF_SMM_MASK;
1392
    cpu_smm_update(env);
1393

    
1394
    sm_state = env->smbase + 0x8000;
1395

    
1396
#ifdef TARGET_X86_64
1397
    for(i = 0; i < 6; i++) {
1398
        dt = &env->segs[i];
1399
        offset = 0x7e00 + i * 16;
1400
        stw_phys(sm_state + offset, dt->selector);
1401
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1402
        stl_phys(sm_state + offset + 4, dt->limit);
1403
        stq_phys(sm_state + offset + 8, dt->base);
1404
    }
1405

    
1406
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1407
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1408

    
1409
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1410
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1411
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1412
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1413

    
1414
    stq_phys(sm_state + 0x7e88, env->idt.base);
1415
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1416

    
1417
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1418
    stq_phys(sm_state + 0x7e98, env->tr.base);
1419
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1420
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1421

    
1422
    stq_phys(sm_state + 0x7ed0, env->efer);
1423

    
1424
    stq_phys(sm_state + 0x7ff8, EAX);
1425
    stq_phys(sm_state + 0x7ff0, ECX);
1426
    stq_phys(sm_state + 0x7fe8, EDX);
1427
    stq_phys(sm_state + 0x7fe0, EBX);
1428
    stq_phys(sm_state + 0x7fd8, ESP);
1429
    stq_phys(sm_state + 0x7fd0, EBP);
1430
    stq_phys(sm_state + 0x7fc8, ESI);
1431
    stq_phys(sm_state + 0x7fc0, EDI);
1432
    for(i = 8; i < 16; i++)
1433
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1434
    stq_phys(sm_state + 0x7f78, env->eip);
1435
    stl_phys(sm_state + 0x7f70, compute_eflags());
1436
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1437
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1438

    
1439
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1440
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1441
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1442

    
1443
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1444
    stl_phys(sm_state + 0x7f00, env->smbase);
1445
#else
1446
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1447
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1448
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1449
    stl_phys(sm_state + 0x7ff0, env->eip);
1450
    stl_phys(sm_state + 0x7fec, EDI);
1451
    stl_phys(sm_state + 0x7fe8, ESI);
1452
    stl_phys(sm_state + 0x7fe4, EBP);
1453
    stl_phys(sm_state + 0x7fe0, ESP);
1454
    stl_phys(sm_state + 0x7fdc, EBX);
1455
    stl_phys(sm_state + 0x7fd8, EDX);
1456
    stl_phys(sm_state + 0x7fd4, ECX);
1457
    stl_phys(sm_state + 0x7fd0, EAX);
1458
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1459
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1460

    
1461
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1462
    stl_phys(sm_state + 0x7f64, env->tr.base);
1463
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1464
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1465

    
1466
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1467
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1468
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1469
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1470

    
1471
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1472
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1473

    
1474
    stl_phys(sm_state + 0x7f58, env->idt.base);
1475
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1476

    
1477
    for(i = 0; i < 6; i++) {
1478
        dt = &env->segs[i];
1479
        if (i < 3)
1480
            offset = 0x7f84 + i * 12;
1481
        else
1482
            offset = 0x7f2c + (i - 3) * 12;
1483
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1484
        stl_phys(sm_state + offset + 8, dt->base);
1485
        stl_phys(sm_state + offset + 4, dt->limit);
1486
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1487
    }
1488
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1489

    
1490
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1491
    stl_phys(sm_state + 0x7ef8, env->smbase);
1492
#endif
1493
    /* init SMM cpu state */
1494

    
1495
#ifdef TARGET_X86_64
1496
    cpu_load_efer(env, 0);
1497
#endif
1498
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1499
    env->eip = 0x00008000;
1500
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1501
                           0xffffffff, 0);
1502
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1503
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1507

    
1508
    cpu_x86_update_cr0(env,
1509
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1510
    cpu_x86_update_cr4(env, 0);
1511
    env->dr[7] = 0x00000400;
1512
    CC_OP = CC_OP_EFLAGS;
1513
}
1514

    
1515
void helper_rsm(void)
1516
{
1517
    target_ulong sm_state;
1518
    int i, offset;
1519
    uint32_t val;
1520

    
1521
    sm_state = env->smbase + 0x8000;
1522
#ifdef TARGET_X86_64
1523
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1524

    
1525
    for(i = 0; i < 6; i++) {
1526
        offset = 0x7e00 + i * 16;
1527
        cpu_x86_load_seg_cache(env, i,
1528
                               lduw_phys(sm_state + offset),
1529
                               ldq_phys(sm_state + offset + 8),
1530
                               ldl_phys(sm_state + offset + 4),
1531
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1532
    }
1533

    
1534
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1535
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1536

    
1537
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1538
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1539
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1540
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1541

    
1542
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1543
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1544

    
1545
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1546
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1547
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1548
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1549

    
1550
    EAX = ldq_phys(sm_state + 0x7ff8);
1551
    ECX = ldq_phys(sm_state + 0x7ff0);
1552
    EDX = ldq_phys(sm_state + 0x7fe8);
1553
    EBX = ldq_phys(sm_state + 0x7fe0);
1554
    ESP = ldq_phys(sm_state + 0x7fd8);
1555
    EBP = ldq_phys(sm_state + 0x7fd0);
1556
    ESI = ldq_phys(sm_state + 0x7fc8);
1557
    EDI = ldq_phys(sm_state + 0x7fc0);
1558
    for(i = 8; i < 16; i++)
1559
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1560
    env->eip = ldq_phys(sm_state + 0x7f78);
1561
    load_eflags(ldl_phys(sm_state + 0x7f70),
1562
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1563
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1564
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1565

    
1566
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1567
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1568
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1569

    
1570
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1571
    if (val & 0x20000) {
1572
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1573
    }
1574
#else
1575
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1576
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1577
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1578
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1579
    env->eip = ldl_phys(sm_state + 0x7ff0);
1580
    EDI = ldl_phys(sm_state + 0x7fec);
1581
    ESI = ldl_phys(sm_state + 0x7fe8);
1582
    EBP = ldl_phys(sm_state + 0x7fe4);
1583
    ESP = ldl_phys(sm_state + 0x7fe0);
1584
    EBX = ldl_phys(sm_state + 0x7fdc);
1585
    EDX = ldl_phys(sm_state + 0x7fd8);
1586
    ECX = ldl_phys(sm_state + 0x7fd4);
1587
    EAX = ldl_phys(sm_state + 0x7fd0);
1588
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1589
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1590

    
1591
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1592
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1593
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1594
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1595

    
1596
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1597
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1598
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1599
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1600

    
1601
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1602
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1603

    
1604
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1605
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1606

    
1607
    for(i = 0; i < 6; i++) {
1608
        if (i < 3)
1609
            offset = 0x7f84 + i * 12;
1610
        else
1611
            offset = 0x7f2c + (i - 3) * 12;
1612
        cpu_x86_load_seg_cache(env, i,
1613
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1614
                               ldl_phys(sm_state + offset + 8),
1615
                               ldl_phys(sm_state + offset + 4),
1616
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1617
    }
1618
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1619

    
1620
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1621
    if (val & 0x20000) {
1622
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1623
    }
1624
#endif
1625
    CC_OP = CC_OP_EFLAGS;
1626
    env->hflags &= ~HF_SMM_MASK;
1627
    cpu_smm_update(env);
1628

    
1629
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1630
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1631
}
1632

    
1633
#endif /* !CONFIG_USER_ONLY */
1634

    
1635

    
1636
/* division, flags are undefined */
1637

    
1638
void helper_divb_AL(target_ulong t0)
1639
{
1640
    unsigned int num, den, q, r;
1641

    
1642
    num = (EAX & 0xffff);
1643
    den = (t0 & 0xff);
1644
    if (den == 0) {
1645
        raise_exception(EXCP00_DIVZ);
1646
    }
1647
    q = (num / den);
1648
    if (q > 0xff)
1649
        raise_exception(EXCP00_DIVZ);
1650
    q &= 0xff;
1651
    r = (num % den) & 0xff;
1652
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1653
}
1654

    
1655
void helper_idivb_AL(target_ulong t0)
1656
{
1657
    int num, den, q, r;
1658

    
1659
    num = (int16_t)EAX;
1660
    den = (int8_t)t0;
1661
    if (den == 0) {
1662
        raise_exception(EXCP00_DIVZ);
1663
    }
1664
    q = (num / den);
1665
    if (q != (int8_t)q)
1666
        raise_exception(EXCP00_DIVZ);
1667
    q &= 0xff;
1668
    r = (num % den) & 0xff;
1669
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1670
}
1671

    
1672
void helper_divw_AX(target_ulong t0)
1673
{
1674
    unsigned int num, den, q, r;
1675

    
1676
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1677
    den = (t0 & 0xffff);
1678
    if (den == 0) {
1679
        raise_exception(EXCP00_DIVZ);
1680
    }
1681
    q = (num / den);
1682
    if (q > 0xffff)
1683
        raise_exception(EXCP00_DIVZ);
1684
    q &= 0xffff;
1685
    r = (num % den) & 0xffff;
1686
    EAX = (EAX & ~0xffff) | q;
1687
    EDX = (EDX & ~0xffff) | r;
1688
}
1689

    
1690
void helper_idivw_AX(target_ulong t0)
1691
{
1692
    int num, den, q, r;
1693

    
1694
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1695
    den = (int16_t)t0;
1696
    if (den == 0) {
1697
        raise_exception(EXCP00_DIVZ);
1698
    }
1699
    q = (num / den);
1700
    if (q != (int16_t)q)
1701
        raise_exception(EXCP00_DIVZ);
1702
    q &= 0xffff;
1703
    r = (num % den) & 0xffff;
1704
    EAX = (EAX & ~0xffff) | q;
1705
    EDX = (EDX & ~0xffff) | r;
1706
}
1707

    
1708
void helper_divl_EAX(target_ulong t0)
1709
{
1710
    unsigned int den, r;
1711
    uint64_t num, q;
1712

    
1713
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1714
    den = t0;
1715
    if (den == 0) {
1716
        raise_exception(EXCP00_DIVZ);
1717
    }
1718
    q = (num / den);
1719
    r = (num % den);
1720
    if (q > 0xffffffff)
1721
        raise_exception(EXCP00_DIVZ);
1722
    EAX = (uint32_t)q;
1723
    EDX = (uint32_t)r;
1724
}
1725

    
1726
void helper_idivl_EAX(target_ulong t0)
1727
{
1728
    int den, r;
1729
    int64_t num, q;
1730

    
1731
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1732
    den = t0;
1733
    if (den == 0) {
1734
        raise_exception(EXCP00_DIVZ);
1735
    }
1736
    q = (num / den);
1737
    r = (num % den);
1738
    if (q != (int32_t)q)
1739
        raise_exception(EXCP00_DIVZ);
1740
    EAX = (uint32_t)q;
1741
    EDX = (uint32_t)r;
1742
}
1743

    
1744
/* bcd */
1745

    
1746
/* XXX: exception */
1747
void helper_aam(int base)
1748
{
1749
    int al, ah;
1750
    al = EAX & 0xff;
1751
    ah = al / base;
1752
    al = al % base;
1753
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1754
    CC_DST = al;
1755
}
1756

    
1757
void helper_aad(int base)
1758
{
1759
    int al, ah;
1760
    al = EAX & 0xff;
1761
    ah = (EAX >> 8) & 0xff;
1762
    al = ((ah * base) + al) & 0xff;
1763
    EAX = (EAX & ~0xffff) | al;
1764
    CC_DST = al;
1765
}
1766

    
1767
void helper_aaa(void)
1768
{
1769
    int icarry;
1770
    int al, ah, af;
1771
    int eflags;
1772

    
1773
    eflags = helper_cc_compute_all(CC_OP);
1774
    af = eflags & CC_A;
1775
    al = EAX & 0xff;
1776
    ah = (EAX >> 8) & 0xff;
1777

    
1778
    icarry = (al > 0xf9);
1779
    if (((al & 0x0f) > 9 ) || af) {
1780
        al = (al + 6) & 0x0f;
1781
        ah = (ah + 1 + icarry) & 0xff;
1782
        eflags |= CC_C | CC_A;
1783
    } else {
1784
        eflags &= ~(CC_C | CC_A);
1785
        al &= 0x0f;
1786
    }
1787
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1788
    CC_SRC = eflags;
1789
}
1790

    
1791
void helper_aas(void)
1792
{
1793
    int icarry;
1794
    int al, ah, af;
1795
    int eflags;
1796

    
1797
    eflags = helper_cc_compute_all(CC_OP);
1798
    af = eflags & CC_A;
1799
    al = EAX & 0xff;
1800
    ah = (EAX >> 8) & 0xff;
1801

    
1802
    icarry = (al < 6);
1803
    if (((al & 0x0f) > 9 ) || af) {
1804
        al = (al - 6) & 0x0f;
1805
        ah = (ah - 1 - icarry) & 0xff;
1806
        eflags |= CC_C | CC_A;
1807
    } else {
1808
        eflags &= ~(CC_C | CC_A);
1809
        al &= 0x0f;
1810
    }
1811
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1812
    CC_SRC = eflags;
1813
}
1814

    
1815
void helper_daa(void)
1816
{
1817
    int al, af, cf;
1818
    int eflags;
1819

    
1820
    eflags = helper_cc_compute_all(CC_OP);
1821
    cf = eflags & CC_C;
1822
    af = eflags & CC_A;
1823
    al = EAX & 0xff;
1824

    
1825
    eflags = 0;
1826
    if (((al & 0x0f) > 9 ) || af) {
1827
        al = (al + 6) & 0xff;
1828
        eflags |= CC_A;
1829
    }
1830
    if ((al > 0x9f) || cf) {
1831
        al = (al + 0x60) & 0xff;
1832
        eflags |= CC_C;
1833
    }
1834
    EAX = (EAX & ~0xff) | al;
1835
    /* well, speed is not an issue here, so we compute the flags by hand */
1836
    eflags |= (al == 0) << 6; /* zf */
1837
    eflags |= parity_table[al]; /* pf */
1838
    eflags |= (al & 0x80); /* sf */
1839
    CC_SRC = eflags;
1840
}
1841

    
1842
void helper_das(void)
1843
{
1844
    int al, al1, af, cf;
1845
    int eflags;
1846

    
1847
    eflags = helper_cc_compute_all(CC_OP);
1848
    cf = eflags & CC_C;
1849
    af = eflags & CC_A;
1850
    al = EAX & 0xff;
1851

    
1852
    eflags = 0;
1853
    al1 = al;
1854
    if (((al & 0x0f) > 9 ) || af) {
1855
        eflags |= CC_A;
1856
        if (al < 6 || cf)
1857
            eflags |= CC_C;
1858
        al = (al - 6) & 0xff;
1859
    }
1860
    if ((al1 > 0x99) || cf) {
1861
        al = (al - 0x60) & 0xff;
1862
        eflags |= CC_C;
1863
    }
1864
    EAX = (EAX & ~0xff) | al;
1865
    /* well, speed is not an issue here, so we compute the flags by hand */
1866
    eflags |= (al == 0) << 6; /* zf */
1867
    eflags |= parity_table[al]; /* pf */
1868
    eflags |= (al & 0x80); /* sf */
1869
    CC_SRC = eflags;
1870
}
1871

    
1872
void helper_into(int next_eip_addend)
1873
{
1874
    int eflags;
1875
    eflags = helper_cc_compute_all(CC_OP);
1876
    if (eflags & CC_O) {
1877
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1878
    }
1879
}
1880

    
1881
void helper_cmpxchg8b(target_ulong a0)
1882
{
1883
    uint64_t d;
1884
    int eflags;
1885

    
1886
    eflags = helper_cc_compute_all(CC_OP);
1887
    d = ldq(a0);
1888
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1889
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1890
        eflags |= CC_Z;
1891
    } else {
1892
        /* always do the store */
1893
        stq(a0, d); 
1894
        EDX = (uint32_t)(d >> 32);
1895
        EAX = (uint32_t)d;
1896
        eflags &= ~CC_Z;
1897
    }
1898
    CC_SRC = eflags;
1899
}
1900

    
1901
#ifdef TARGET_X86_64
1902
void helper_cmpxchg16b(target_ulong a0)
1903
{
1904
    uint64_t d0, d1;
1905
    int eflags;
1906

    
1907
    if ((a0 & 0xf) != 0)
1908
        raise_exception(EXCP0D_GPF);
1909
    eflags = helper_cc_compute_all(CC_OP);
1910
    d0 = ldq(a0);
1911
    d1 = ldq(a0 + 8);
1912
    if (d0 == EAX && d1 == EDX) {
1913
        stq(a0, EBX);
1914
        stq(a0 + 8, ECX);
1915
        eflags |= CC_Z;
1916
    } else {
1917
        /* always do the store */
1918
        stq(a0, d0); 
1919
        stq(a0 + 8, d1); 
1920
        EDX = d1;
1921
        EAX = d0;
1922
        eflags &= ~CC_Z;
1923
    }
1924
    CC_SRC = eflags;
1925
}
1926
#endif
1927

    
1928
void helper_single_step(void)
1929
{
1930
#ifndef CONFIG_USER_ONLY
1931
    check_hw_breakpoints(env, 1);
1932
    env->dr[6] |= DR6_BS;
1933
#endif
1934
    raise_exception(EXCP01_DB);
1935
}
1936

    
1937
void helper_cpuid(void)
1938
{
1939
    uint32_t eax, ebx, ecx, edx;
1940

    
1941
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1942

    
1943
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1944
    EAX = eax;
1945
    EBX = ebx;
1946
    ECX = ecx;
1947
    EDX = edx;
1948
}
1949

    
1950
void helper_enter_level(int level, int data32, target_ulong t1)
1951
{
1952
    target_ulong ssp;
1953
    uint32_t esp_mask, esp, ebp;
1954

    
1955
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1956
    ssp = env->segs[R_SS].base;
1957
    ebp = EBP;
1958
    esp = ESP;
1959
    if (data32) {
1960
        /* 32 bit */
1961
        esp -= 4;
1962
        while (--level) {
1963
            esp -= 4;
1964
            ebp -= 4;
1965
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1966
        }
1967
        esp -= 4;
1968
        stl(ssp + (esp & esp_mask), t1);
1969
    } else {
1970
        /* 16 bit */
1971
        esp -= 2;
1972
        while (--level) {
1973
            esp -= 2;
1974
            ebp -= 2;
1975
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1976
        }
1977
        esp -= 2;
1978
        stw(ssp + (esp & esp_mask), t1);
1979
    }
1980
}
1981

    
1982
#ifdef TARGET_X86_64
1983
void helper_enter64_level(int level, int data64, target_ulong t1)
1984
{
1985
    target_ulong esp, ebp;
1986
    ebp = EBP;
1987
    esp = ESP;
1988

    
1989
    if (data64) {
1990
        /* 64 bit */
1991
        esp -= 8;
1992
        while (--level) {
1993
            esp -= 8;
1994
            ebp -= 8;
1995
            stq(esp, ldq(ebp));
1996
        }
1997
        esp -= 8;
1998
        stq(esp, t1);
1999
    } else {
2000
        /* 16 bit */
2001
        esp -= 2;
2002
        while (--level) {
2003
            esp -= 2;
2004
            ebp -= 2;
2005
            stw(esp, lduw(ebp));
2006
        }
2007
        esp -= 2;
2008
        stw(esp, t1);
2009
    }
2010
}
2011
#endif
2012

    
2013
void helper_lldt(int selector)
2014
{
2015
    SegmentCache *dt;
2016
    uint32_t e1, e2;
2017
    int index, entry_limit;
2018
    target_ulong ptr;
2019

    
2020
    selector &= 0xffff;
2021
    if ((selector & 0xfffc) == 0) {
2022
        /* XXX: NULL selector case: invalid LDT */
2023
        env->ldt.base = 0;
2024
        env->ldt.limit = 0;
2025
    } else {
2026
        if (selector & 0x4)
2027
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2028
        dt = &env->gdt;
2029
        index = selector & ~7;
2030
#ifdef TARGET_X86_64
2031
        if (env->hflags & HF_LMA_MASK)
2032
            entry_limit = 15;
2033
        else
2034
#endif
2035
            entry_limit = 7;
2036
        if ((index + entry_limit) > dt->limit)
2037
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2038
        ptr = dt->base + index;
2039
        e1 = ldl_kernel(ptr);
2040
        e2 = ldl_kernel(ptr + 4);
2041
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2042
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2043
        if (!(e2 & DESC_P_MASK))
2044
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2045
#ifdef TARGET_X86_64
2046
        if (env->hflags & HF_LMA_MASK) {
2047
            uint32_t e3;
2048
            e3 = ldl_kernel(ptr + 8);
2049
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2050
            env->ldt.base |= (target_ulong)e3 << 32;
2051
        } else
2052
#endif
2053
        {
2054
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2055
        }
2056
    }
2057
    env->ldt.selector = selector;
2058
}
2059

    
2060
void helper_ltr(int selector)
2061
{
2062
    SegmentCache *dt;
2063
    uint32_t e1, e2;
2064
    int index, type, entry_limit;
2065
    target_ulong ptr;
2066

    
2067
    selector &= 0xffff;
2068
    if ((selector & 0xfffc) == 0) {
2069
        /* NULL selector case: invalid TR */
2070
        env->tr.base = 0;
2071
        env->tr.limit = 0;
2072
        env->tr.flags = 0;
2073
    } else {
2074
        if (selector & 0x4)
2075
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2076
        dt = &env->gdt;
2077
        index = selector & ~7;
2078
#ifdef TARGET_X86_64
2079
        if (env->hflags & HF_LMA_MASK)
2080
            entry_limit = 15;
2081
        else
2082
#endif
2083
            entry_limit = 7;
2084
        if ((index + entry_limit) > dt->limit)
2085
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2086
        ptr = dt->base + index;
2087
        e1 = ldl_kernel(ptr);
2088
        e2 = ldl_kernel(ptr + 4);
2089
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2090
        if ((e2 & DESC_S_MASK) ||
2091
            (type != 1 && type != 9))
2092
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093
        if (!(e2 & DESC_P_MASK))
2094
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2095
#ifdef TARGET_X86_64
2096
        if (env->hflags & HF_LMA_MASK) {
2097
            uint32_t e3, e4;
2098
            e3 = ldl_kernel(ptr + 8);
2099
            e4 = ldl_kernel(ptr + 12);
2100
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2101
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2102
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2103
            env->tr.base |= (target_ulong)e3 << 32;
2104
        } else
2105
#endif
2106
        {
2107
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2108
        }
2109
        e2 |= DESC_TSS_BUSY_MASK;
2110
        stl_kernel(ptr + 4, e2);
2111
    }
2112
    env->tr.selector = selector;
2113
}
2114

    
2115
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2116
void helper_load_seg(int seg_reg, int selector)
2117
{
2118
    uint32_t e1, e2;
2119
    int cpl, dpl, rpl;
2120
    SegmentCache *dt;
2121
    int index;
2122
    target_ulong ptr;
2123

    
2124
    selector &= 0xffff;
2125
    cpl = env->hflags & HF_CPL_MASK;
2126
    if ((selector & 0xfffc) == 0) {
2127
        /* null selector case */
2128
        if (seg_reg == R_SS
2129
#ifdef TARGET_X86_64
2130
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2131
#endif
2132
            )
2133
            raise_exception_err(EXCP0D_GPF, 0);
2134
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2135
    } else {
2136

    
2137
        if (selector & 0x4)
2138
            dt = &env->ldt;
2139
        else
2140
            dt = &env->gdt;
2141
        index = selector & ~7;
2142
        if ((index + 7) > dt->limit)
2143
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2144
        ptr = dt->base + index;
2145
        e1 = ldl_kernel(ptr);
2146
        e2 = ldl_kernel(ptr + 4);
2147

    
2148
        if (!(e2 & DESC_S_MASK))
2149
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2150
        rpl = selector & 3;
2151
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2152
        if (seg_reg == R_SS) {
2153
            /* must be writable segment */
2154
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2155
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156
            if (rpl != cpl || dpl != cpl)
2157
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158
        } else {
2159
            /* must be readable segment */
2160
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2161
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2162

    
2163
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2164
                /* if not conforming code, test rights */
2165
                if (dpl < cpl || dpl < rpl)
2166
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2167
            }
2168
        }
2169

    
2170
        if (!(e2 & DESC_P_MASK)) {
2171
            if (seg_reg == R_SS)
2172
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2173
            else
2174
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2175
        }
2176

    
2177
        /* set the access bit if not already set */
2178
        if (!(e2 & DESC_A_MASK)) {
2179
            e2 |= DESC_A_MASK;
2180
            stl_kernel(ptr + 4, e2);
2181
        }
2182

    
2183
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2184
                       get_seg_base(e1, e2),
2185
                       get_seg_limit(e1, e2),
2186
                       e2);
2187
#if 0
2188
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2189
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2190
#endif
2191
    }
2192
}
2193

    
2194
/* protected mode jump */
2195
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2196
                           int next_eip_addend)
2197
{
2198
    int gate_cs, type;
2199
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2200
    target_ulong next_eip;
2201

    
2202
    if ((new_cs & 0xfffc) == 0)
2203
        raise_exception_err(EXCP0D_GPF, 0);
2204
    if (load_segment(&e1, &e2, new_cs) != 0)
2205
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2206
    cpl = env->hflags & HF_CPL_MASK;
2207
    if (e2 & DESC_S_MASK) {
2208
        if (!(e2 & DESC_CS_MASK))
2209
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2210
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211
        if (e2 & DESC_C_MASK) {
2212
            /* conforming code segment */
2213
            if (dpl > cpl)
2214
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2215
        } else {
2216
            /* non conforming code segment */
2217
            rpl = new_cs & 3;
2218
            if (rpl > cpl)
2219
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2220
            if (dpl != cpl)
2221
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
        }
2223
        if (!(e2 & DESC_P_MASK))
2224
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2225
        limit = get_seg_limit(e1, e2);
2226
        if (new_eip > limit &&
2227
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2228
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2229
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2230
                       get_seg_base(e1, e2), limit, e2);
2231
        EIP = new_eip;
2232
    } else {
2233
        /* jump to call or task gate */
2234
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2235
        rpl = new_cs & 3;
2236
        cpl = env->hflags & HF_CPL_MASK;
2237
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2238
        switch(type) {
2239
        case 1: /* 286 TSS */
2240
        case 9: /* 386 TSS */
2241
        case 5: /* task gate */
2242
            if (dpl < cpl || dpl < rpl)
2243
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244
            next_eip = env->eip + next_eip_addend;
2245
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2246
            CC_OP = CC_OP_EFLAGS;
2247
            break;
2248
        case 4: /* 286 call gate */
2249
        case 12: /* 386 call gate */
2250
            if ((dpl < cpl) || (dpl < rpl))
2251
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252
            if (!(e2 & DESC_P_MASK))
2253
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2254
            gate_cs = e1 >> 16;
2255
            new_eip = (e1 & 0xffff);
2256
            if (type == 12)
2257
                new_eip |= (e2 & 0xffff0000);
2258
            if (load_segment(&e1, &e2, gate_cs) != 0)
2259
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2260
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2261
            /* must be code segment */
2262
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2263
                 (DESC_S_MASK | DESC_CS_MASK)))
2264
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2265
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2266
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2267
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268
            if (!(e2 & DESC_P_MASK))
2269
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270
            limit = get_seg_limit(e1, e2);
2271
            if (new_eip > limit)
2272
                raise_exception_err(EXCP0D_GPF, 0);
2273
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2274
                                   get_seg_base(e1, e2), limit, e2);
2275
            EIP = new_eip;
2276
            break;
2277
        default:
2278
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2279
            break;
2280
        }
2281
    }
2282
}
2283

    
2284
/* real mode call */
2285
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2286
                       int shift, int next_eip)
2287
{
2288
    int new_eip;
2289
    uint32_t esp, esp_mask;
2290
    target_ulong ssp;
2291

    
2292
    new_eip = new_eip1;
2293
    esp = ESP;
2294
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2295
    ssp = env->segs[R_SS].base;
2296
    if (shift) {
2297
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2298
        PUSHL(ssp, esp, esp_mask, next_eip);
2299
    } else {
2300
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2301
        PUSHW(ssp, esp, esp_mask, next_eip);
2302
    }
2303

    
2304
    SET_ESP(esp, esp_mask);
2305
    env->eip = new_eip;
2306
    env->segs[R_CS].selector = new_cs;
2307
    env->segs[R_CS].base = (new_cs << 4);
2308
}
2309

    
2310
/* protected mode call */
2311
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2312
                            int shift, int next_eip_addend)
2313
{
2314
    int new_stack, i;
2315
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2316
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2317
    uint32_t val, limit, old_sp_mask;
2318
    target_ulong ssp, old_ssp, next_eip;
2319

    
2320
    next_eip = env->eip + next_eip_addend;
2321
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2322
    LOG_PCALL_STATE(env);
2323
    if ((new_cs & 0xfffc) == 0)
2324
        raise_exception_err(EXCP0D_GPF, 0);
2325
    if (load_segment(&e1, &e2, new_cs) != 0)
2326
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2327
    cpl = env->hflags & HF_CPL_MASK;
2328
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2329
    if (e2 & DESC_S_MASK) {
2330
        if (!(e2 & DESC_CS_MASK))
2331
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2332
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2333
        if (e2 & DESC_C_MASK) {
2334
            /* conforming code segment */
2335
            if (dpl > cpl)
2336
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2337
        } else {
2338
            /* non conforming code segment */
2339
            rpl = new_cs & 3;
2340
            if (rpl > cpl)
2341
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2342
            if (dpl != cpl)
2343
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344
        }
2345
        if (!(e2 & DESC_P_MASK))
2346
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2347

    
2348
#ifdef TARGET_X86_64
2349
        /* XXX: check 16/32 bit cases in long mode */
2350
        if (shift == 2) {
2351
            target_ulong rsp;
2352
            /* 64 bit case */
2353
            rsp = ESP;
2354
            PUSHQ(rsp, env->segs[R_CS].selector);
2355
            PUSHQ(rsp, next_eip);
2356
            /* from this point, not restartable */
2357
            ESP = rsp;
2358
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2359
                                   get_seg_base(e1, e2),
2360
                                   get_seg_limit(e1, e2), e2);
2361
            EIP = new_eip;
2362
        } else
2363
#endif
2364
        {
2365
            sp = ESP;
2366
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2367
            ssp = env->segs[R_SS].base;
2368
            if (shift) {
2369
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2370
                PUSHL(ssp, sp, sp_mask, next_eip);
2371
            } else {
2372
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2373
                PUSHW(ssp, sp, sp_mask, next_eip);
2374
            }
2375

    
2376
            limit = get_seg_limit(e1, e2);
2377
            if (new_eip > limit)
2378
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2379
            /* from this point, not restartable */
2380
            SET_ESP(sp, sp_mask);
2381
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2382
                                   get_seg_base(e1, e2), limit, e2);
2383
            EIP = new_eip;
2384
        }
2385
    } else {
2386
        /* check gate type */
2387
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2388
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2389
        rpl = new_cs & 3;
2390
        switch(type) {
2391
        case 1: /* available 286 TSS */
2392
        case 9: /* available 386 TSS */
2393
        case 5: /* task gate */
2394
            if (dpl < cpl || dpl < rpl)
2395
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2397
            CC_OP = CC_OP_EFLAGS;
2398
            return;
2399
        case 4: /* 286 call gate */
2400
        case 12: /* 386 call gate */
2401
            break;
2402
        default:
2403
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2404
            break;
2405
        }
2406
        shift = type >> 3;
2407

    
2408
        if (dpl < cpl || dpl < rpl)
2409
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2410
        /* check valid bit */
2411
        if (!(e2 & DESC_P_MASK))
2412
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2413
        selector = e1 >> 16;
2414
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2415
        param_count = e2 & 0x1f;
2416
        if ((selector & 0xfffc) == 0)
2417
            raise_exception_err(EXCP0D_GPF, 0);
2418

    
2419
        if (load_segment(&e1, &e2, selector) != 0)
2420
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2421
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2424
        if (dpl > cpl)
2425
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2426
        if (!(e2 & DESC_P_MASK))
2427
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2428

    
2429
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2430
            /* to inner privilege */
2431
            get_ss_esp_from_tss(&ss, &sp, dpl);
2432
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2433
                        ss, sp, param_count, ESP);
2434
            if ((ss & 0xfffc) == 0)
2435
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436
            if ((ss & 3) != dpl)
2437
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2441
            if (ss_dpl != dpl)
2442
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443
            if (!(ss_e2 & DESC_S_MASK) ||
2444
                (ss_e2 & DESC_CS_MASK) ||
2445
                !(ss_e2 & DESC_W_MASK))
2446
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2447
            if (!(ss_e2 & DESC_P_MASK))
2448
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449

    
2450
            //            push_size = ((param_count * 2) + 8) << shift;
2451

    
2452
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2453
            old_ssp = env->segs[R_SS].base;
2454

    
2455
            sp_mask = get_sp_mask(ss_e2);
2456
            ssp = get_seg_base(ss_e1, ss_e2);
2457
            if (shift) {
2458
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2459
                PUSHL(ssp, sp, sp_mask, ESP);
2460
                for(i = param_count - 1; i >= 0; i--) {
2461
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2462
                    PUSHL(ssp, sp, sp_mask, val);
2463
                }
2464
            } else {
2465
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2466
                PUSHW(ssp, sp, sp_mask, ESP);
2467
                for(i = param_count - 1; i >= 0; i--) {
2468
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2469
                    PUSHW(ssp, sp, sp_mask, val);
2470
                }
2471
            }
2472
            new_stack = 1;
2473
        } else {
2474
            /* to same privilege */
2475
            sp = ESP;
2476
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2477
            ssp = env->segs[R_SS].base;
2478
            //            push_size = (4 << shift);
2479
            new_stack = 0;
2480
        }
2481

    
2482
        if (shift) {
2483
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484
            PUSHL(ssp, sp, sp_mask, next_eip);
2485
        } else {
2486
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2487
            PUSHW(ssp, sp, sp_mask, next_eip);
2488
        }
2489

    
2490
        /* from this point, not restartable */
2491

    
2492
        if (new_stack) {
2493
            ss = (ss & ~3) | dpl;
2494
            cpu_x86_load_seg_cache(env, R_SS, ss,
2495
                                   ssp,
2496
                                   get_seg_limit(ss_e1, ss_e2),
2497
                                   ss_e2);
2498
        }
2499

    
2500
        selector = (selector & ~3) | dpl;
2501
        cpu_x86_load_seg_cache(env, R_CS, selector,
2502
                       get_seg_base(e1, e2),
2503
                       get_seg_limit(e1, e2),
2504
                       e2);
2505
        cpu_x86_set_cpl(env, dpl);
2506
        SET_ESP(sp, sp_mask);
2507
        EIP = offset;
2508
    }
2509
#ifdef CONFIG_KQEMU
2510
    if (kqemu_is_ok(env)) {
2511
        env->exception_index = -1;
2512
        cpu_loop_exit();
2513
    }
2514
#endif
2515
}
2516

    
2517
/* real and vm86 mode iret */
2518
void helper_iret_real(int shift)
2519
{
2520
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2521
    target_ulong ssp;
2522
    int eflags_mask;
2523

    
2524
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2525
    sp = ESP;
2526
    ssp = env->segs[R_SS].base;
2527
    if (shift == 1) {
2528
        /* 32 bits */
2529
        POPL(ssp, sp, sp_mask, new_eip);
2530
        POPL(ssp, sp, sp_mask, new_cs);
2531
        new_cs &= 0xffff;
2532
        POPL(ssp, sp, sp_mask, new_eflags);
2533
    } else {
2534
        /* 16 bits */
2535
        POPW(ssp, sp, sp_mask, new_eip);
2536
        POPW(ssp, sp, sp_mask, new_cs);
2537
        POPW(ssp, sp, sp_mask, new_eflags);
2538
    }
2539
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2540
    env->segs[R_CS].selector = new_cs;
2541
    env->segs[R_CS].base = (new_cs << 4);
2542
    env->eip = new_eip;
2543
    if (env->eflags & VM_MASK)
2544
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2545
    else
2546
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2547
    if (shift == 0)
2548
        eflags_mask &= 0xffff;
2549
    load_eflags(new_eflags, eflags_mask);
2550
    env->hflags2 &= ~HF2_NMI_MASK;
2551
}
2552

    
2553
static inline void validate_seg(int seg_reg, int cpl)
2554
{
2555
    int dpl;
2556
    uint32_t e2;
2557

    
2558
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2559
       they may still contain a valid base. I would be interested to
2560
       know how a real x86_64 CPU behaves */
2561
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2562
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2563
        return;
2564

    
2565
    e2 = env->segs[seg_reg].flags;
2566
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2567
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2568
        /* data or non conforming code segment */
2569
        if (dpl < cpl) {
2570
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2571
        }
2572
    }
2573
}
2574

    
2575
/* protected mode iret */
2576
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2577
{
2578
    uint32_t new_cs, new_eflags, new_ss;
2579
    uint32_t new_es, new_ds, new_fs, new_gs;
2580
    uint32_t e1, e2, ss_e1, ss_e2;
2581
    int cpl, dpl, rpl, eflags_mask, iopl;
2582
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2583

    
2584
#ifdef TARGET_X86_64
2585
    if (shift == 2)
2586
        sp_mask = -1;
2587
    else
2588
#endif
2589
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2590
    sp = ESP;
2591
    ssp = env->segs[R_SS].base;
2592
    new_eflags = 0; /* avoid warning */
2593
#ifdef TARGET_X86_64
2594
    if (shift == 2) {
2595
        POPQ(sp, new_eip);
2596
        POPQ(sp, new_cs);
2597
        new_cs &= 0xffff;
2598
        if (is_iret) {
2599
            POPQ(sp, new_eflags);
2600
        }
2601
    } else
2602
#endif
2603
    if (shift == 1) {
2604
        /* 32 bits */
2605
        POPL(ssp, sp, sp_mask, new_eip);
2606
        POPL(ssp, sp, sp_mask, new_cs);
2607
        new_cs &= 0xffff;
2608
        if (is_iret) {
2609
            POPL(ssp, sp, sp_mask, new_eflags);
2610
            if (new_eflags & VM_MASK)
2611
                goto return_to_vm86;
2612
        }
2613
    } else {
2614
        /* 16 bits */
2615
        POPW(ssp, sp, sp_mask, new_eip);
2616
        POPW(ssp, sp, sp_mask, new_cs);
2617
        if (is_iret)
2618
            POPW(ssp, sp, sp_mask, new_eflags);
2619
    }
2620
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2621
              new_cs, new_eip, shift, addend);
2622
    LOG_PCALL_STATE(env);
2623
    if ((new_cs & 0xfffc) == 0)
2624
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2625
    if (load_segment(&e1, &e2, new_cs) != 0)
2626
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627
    if (!(e2 & DESC_S_MASK) ||
2628
        !(e2 & DESC_CS_MASK))
2629
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630
    cpl = env->hflags & HF_CPL_MASK;
2631
    rpl = new_cs & 3;
2632
    if (rpl < cpl)
2633
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2635
    if (e2 & DESC_C_MASK) {
2636
        if (dpl > rpl)
2637
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2638
    } else {
2639
        if (dpl != rpl)
2640
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2641
    }
2642
    if (!(e2 & DESC_P_MASK))
2643
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2644

    
2645
    sp += addend;
2646
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2647
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2648
        /* return to same privilege level */
2649
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2650
                       get_seg_base(e1, e2),
2651
                       get_seg_limit(e1, e2),
2652
                       e2);
2653
    } else {
2654
        /* return to different privilege level */
2655
#ifdef TARGET_X86_64
2656
        if (shift == 2) {
2657
            POPQ(sp, new_esp);
2658
            POPQ(sp, new_ss);
2659
            new_ss &= 0xffff;
2660
        } else
2661
#endif
2662
        if (shift == 1) {
2663
            /* 32 bits */
2664
            POPL(ssp, sp, sp_mask, new_esp);
2665
            POPL(ssp, sp, sp_mask, new_ss);
2666
            new_ss &= 0xffff;
2667
        } else {
2668
            /* 16 bits */
2669
            POPW(ssp, sp, sp_mask, new_esp);
2670
            POPW(ssp, sp, sp_mask, new_ss);
2671
        }
2672
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2673
                    new_ss, new_esp);
2674
        if ((new_ss & 0xfffc) == 0) {
2675
#ifdef TARGET_X86_64
2676
            /* NULL ss is allowed in long mode if cpl != 3*/
2677
            /* XXX: test CS64 ? */
2678
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2679
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2680
                                       0, 0xffffffff,
2681
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2682
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2683
                                       DESC_W_MASK | DESC_A_MASK);
2684
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2685
            } else
2686
#endif
2687
            {
2688
                raise_exception_err(EXCP0D_GPF, 0);
2689
            }
2690
        } else {
2691
            if ((new_ss & 3) != rpl)
2692
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2693
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2694
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695
            if (!(ss_e2 & DESC_S_MASK) ||
2696
                (ss_e2 & DESC_CS_MASK) ||
2697
                !(ss_e2 & DESC_W_MASK))
2698
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2699
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2700
            if (dpl != rpl)
2701
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2702
            if (!(ss_e2 & DESC_P_MASK))
2703
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2704
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2705
                                   get_seg_base(ss_e1, ss_e2),
2706
                                   get_seg_limit(ss_e1, ss_e2),
2707
                                   ss_e2);
2708
        }
2709

    
2710
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2711
                       get_seg_base(e1, e2),
2712
                       get_seg_limit(e1, e2),
2713
                       e2);
2714
        cpu_x86_set_cpl(env, rpl);
2715
        sp = new_esp;
2716
#ifdef TARGET_X86_64
2717
        if (env->hflags & HF_CS64_MASK)
2718
            sp_mask = -1;
2719
        else
2720
#endif
2721
            sp_mask = get_sp_mask(ss_e2);
2722

    
2723
        /* validate data segments */
2724
        validate_seg(R_ES, rpl);
2725
        validate_seg(R_DS, rpl);
2726
        validate_seg(R_FS, rpl);
2727
        validate_seg(R_GS, rpl);
2728

    
2729
        sp += addend;
2730
    }
2731
    SET_ESP(sp, sp_mask);
2732
    env->eip = new_eip;
2733
    if (is_iret) {
2734
        /* NOTE: 'cpl' is the _old_ CPL */
2735
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2736
        if (cpl == 0)
2737
            eflags_mask |= IOPL_MASK;
2738
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2739
        if (cpl <= iopl)
2740
            eflags_mask |= IF_MASK;
2741
        if (shift == 0)
2742
            eflags_mask &= 0xffff;
2743
        load_eflags(new_eflags, eflags_mask);
2744
    }
2745
    return;
2746

    
2747
 return_to_vm86:
2748
    POPL(ssp, sp, sp_mask, new_esp);
2749
    POPL(ssp, sp, sp_mask, new_ss);
2750
    POPL(ssp, sp, sp_mask, new_es);
2751
    POPL(ssp, sp, sp_mask, new_ds);
2752
    POPL(ssp, sp, sp_mask, new_fs);
2753
    POPL(ssp, sp, sp_mask, new_gs);
2754

    
2755
    /* modify processor state */
2756
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2757
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2758
    load_seg_vm(R_CS, new_cs & 0xffff);
2759
    cpu_x86_set_cpl(env, 3);
2760
    load_seg_vm(R_SS, new_ss & 0xffff);
2761
    load_seg_vm(R_ES, new_es & 0xffff);
2762
    load_seg_vm(R_DS, new_ds & 0xffff);
2763
    load_seg_vm(R_FS, new_fs & 0xffff);
2764
    load_seg_vm(R_GS, new_gs & 0xffff);
2765

    
2766
    env->eip = new_eip & 0xffff;
2767
    ESP = new_esp;
2768
}
2769

    
2770
void helper_iret_protected(int shift, int next_eip)
2771
{
2772
    int tss_selector, type;
2773
    uint32_t e1, e2;
2774

    
2775
    /* specific case for TSS */
2776
    if (env->eflags & NT_MASK) {
2777
#ifdef TARGET_X86_64
2778
        if (env->hflags & HF_LMA_MASK)
2779
            raise_exception_err(EXCP0D_GPF, 0);
2780
#endif
2781
        tss_selector = lduw_kernel(env->tr.base + 0);
2782
        if (tss_selector & 4)
2783
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2784
        if (load_segment(&e1, &e2, tss_selector) != 0)
2785
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2787
        /* NOTE: we check both segment and busy TSS */
2788
        if (type != 3)
2789
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2790
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2791
    } else {
2792
        helper_ret_protected(shift, 1, 0);
2793
    }
2794
    env->hflags2 &= ~HF2_NMI_MASK;
2795
#ifdef CONFIG_KQEMU
2796
    if (kqemu_is_ok(env)) {
2797
        CC_OP = CC_OP_EFLAGS;
2798
        env->exception_index = -1;
2799
        cpu_loop_exit();
2800
    }
2801
#endif
2802
}
2803

    
2804
void helper_lret_protected(int shift, int addend)
2805
{
2806
    helper_ret_protected(shift, 0, addend);
2807
#ifdef CONFIG_KQEMU
2808
    if (kqemu_is_ok(env)) {
2809
        env->exception_index = -1;
2810
        cpu_loop_exit();
2811
    }
2812
#endif
2813
}
2814

    
2815
void helper_sysenter(void)
2816
{
2817
    if (env->sysenter_cs == 0) {
2818
        raise_exception_err(EXCP0D_GPF, 0);
2819
    }
2820
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2821
    cpu_x86_set_cpl(env, 0);
2822

    
2823
#ifdef TARGET_X86_64
2824
    if (env->hflags & HF_LMA_MASK) {
2825
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2826
                               0, 0xffffffff,
2827
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2828
                               DESC_S_MASK |
2829
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2830
    } else
2831
#endif
2832
    {
2833
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2834
                               0, 0xffffffff,
2835
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2836
                               DESC_S_MASK |
2837
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2838
    }
2839
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2840
                           0, 0xffffffff,
2841
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2842
                           DESC_S_MASK |
2843
                           DESC_W_MASK | DESC_A_MASK);
2844
    ESP = env->sysenter_esp;
2845
    EIP = env->sysenter_eip;
2846
}
2847

    
2848
void helper_sysexit(int dflag)
2849
{
2850
    int cpl;
2851

    
2852
    cpl = env->hflags & HF_CPL_MASK;
2853
    if (env->sysenter_cs == 0 || cpl != 0) {
2854
        raise_exception_err(EXCP0D_GPF, 0);
2855
    }
2856
    cpu_x86_set_cpl(env, 3);
2857
#ifdef TARGET_X86_64
2858
    if (dflag == 2) {
2859
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2860
                               0, 0xffffffff,
2861
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2862
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2863
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2864
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2865
                               0, 0xffffffff,
2866
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2867
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2868
                               DESC_W_MASK | DESC_A_MASK);
2869
    } else
2870
#endif
2871
    {
2872
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2873
                               0, 0xffffffff,
2874
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2875
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2876
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2877
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2878
                               0, 0xffffffff,
2879
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2880
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2881
                               DESC_W_MASK | DESC_A_MASK);
2882
    }
2883
    ESP = ECX;
2884
    EIP = EDX;
2885
#ifdef CONFIG_KQEMU
2886
    if (kqemu_is_ok(env)) {
2887
        env->exception_index = -1;
2888
        cpu_loop_exit();
2889
    }
2890
#endif
2891
}
2892

    
2893
#if defined(CONFIG_USER_ONLY)
2894
target_ulong helper_read_crN(int reg)
2895
{
2896
    return 0;
2897
}
2898

    
2899
void helper_write_crN(int reg, target_ulong t0)
2900
{
2901
}
2902

    
2903
void helper_movl_drN_T0(int reg, target_ulong t0)
2904
{
2905
}
2906
#else
2907
target_ulong helper_read_crN(int reg)
2908
{
2909
    target_ulong val;
2910

    
2911
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2912
    switch(reg) {
2913
    default:
2914
        val = env->cr[reg];
2915
        break;
2916
    case 8:
2917
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2918
            val = cpu_get_apic_tpr(env);
2919
        } else {
2920
            val = env->v_tpr;
2921
        }
2922
        break;
2923
    }
2924
    return val;
2925
}
2926

    
2927
void helper_write_crN(int reg, target_ulong t0)
2928
{
2929
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2930
    switch(reg) {
2931
    case 0:
2932
        cpu_x86_update_cr0(env, t0);
2933
        break;
2934
    case 3:
2935
        cpu_x86_update_cr3(env, t0);
2936
        break;
2937
    case 4:
2938
        cpu_x86_update_cr4(env, t0);
2939
        break;
2940
    case 8:
2941
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2942
            cpu_set_apic_tpr(env, t0);
2943
        }
2944
        env->v_tpr = t0 & 0x0f;
2945
        break;
2946
    default:
2947
        env->cr[reg] = t0;
2948
        break;
2949
    }
2950
}
2951

    
2952
void helper_movl_drN_T0(int reg, target_ulong t0)
2953
{
2954
    int i;
2955

    
2956
    if (reg < 4) {
2957
        hw_breakpoint_remove(env, reg);
2958
        env->dr[reg] = t0;
2959
        hw_breakpoint_insert(env, reg);
2960
    } else if (reg == 7) {
2961
        for (i = 0; i < 4; i++)
2962
            hw_breakpoint_remove(env, i);
2963
        env->dr[7] = t0;
2964
        for (i = 0; i < 4; i++)
2965
            hw_breakpoint_insert(env, i);
2966
    } else
2967
        env->dr[reg] = t0;
2968
}
2969
#endif
2970

    
2971
void helper_lmsw(target_ulong t0)
2972
{
2973
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2974
       if already set to one. */
2975
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2976
    helper_write_crN(0, t0);
2977
}
2978

    
2979
void helper_clts(void)
2980
{
2981
    env->cr[0] &= ~CR0_TS_MASK;
2982
    env->hflags &= ~HF_TS_MASK;
2983
}
2984

    
2985
void helper_invlpg(target_ulong addr)
2986
{
2987
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2988
    tlb_flush_page(env, addr);
2989
}
2990

    
2991
void helper_rdtsc(void)
2992
{
2993
    uint64_t val;
2994

    
2995
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2996
        raise_exception(EXCP0D_GPF);
2997
    }
2998
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2999

    
3000
    val = cpu_get_tsc(env) + env->tsc_offset;
3001
    EAX = (uint32_t)(val);
3002
    EDX = (uint32_t)(val >> 32);
3003
}
3004

    
3005
void helper_rdpmc(void)
3006
{
3007
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3008
        raise_exception(EXCP0D_GPF);
3009
    }
3010
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3011
    
3012
    /* currently unimplemented */
3013
    raise_exception_err(EXCP06_ILLOP, 0);
3014
}
3015

    
3016
#if defined(CONFIG_USER_ONLY)
3017
void helper_wrmsr(void)
3018
{
3019
}
3020

    
3021
void helper_rdmsr(void)
3022
{
3023
}
3024
#else
3025
void helper_wrmsr(void)
3026
{
3027
    uint64_t val;
3028

    
3029
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3030

    
3031
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3032

    
3033
    switch((uint32_t)ECX) {
3034
    case MSR_IA32_SYSENTER_CS:
3035
        env->sysenter_cs = val & 0xffff;
3036
        break;
3037
    case MSR_IA32_SYSENTER_ESP:
3038
        env->sysenter_esp = val;
3039
        break;
3040
    case MSR_IA32_SYSENTER_EIP:
3041
        env->sysenter_eip = val;
3042
        break;
3043
    case MSR_IA32_APICBASE:
3044
        cpu_set_apic_base(env, val);
3045
        break;
3046
    case MSR_EFER:
3047
        {
3048
            uint64_t update_mask;
3049
            update_mask = 0;
3050
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3051
                update_mask |= MSR_EFER_SCE;
3052
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3053
                update_mask |= MSR_EFER_LME;
3054
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3055
                update_mask |= MSR_EFER_FFXSR;
3056
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3057
                update_mask |= MSR_EFER_NXE;
3058
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3059
                update_mask |= MSR_EFER_SVME;
3060
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3061
                update_mask |= MSR_EFER_FFXSR;
3062
            cpu_load_efer(env, (env->efer & ~update_mask) |
3063
                          (val & update_mask));
3064
        }
3065
        break;
3066
    case MSR_STAR:
3067
        env->star = val;
3068
        break;
3069
    case MSR_PAT:
3070
        env->pat = val;
3071
        break;
3072
    case MSR_VM_HSAVE_PA:
3073
        env->vm_hsave = val;
3074
        break;
3075
#ifdef TARGET_X86_64
3076
    case MSR_LSTAR:
3077
        env->lstar = val;
3078
        break;
3079
    case MSR_CSTAR:
3080
        env->cstar = val;
3081
        break;
3082
    case MSR_FMASK:
3083
        env->fmask = val;
3084
        break;
3085
    case MSR_FSBASE:
3086
        env->segs[R_FS].base = val;
3087
        break;
3088
    case MSR_GSBASE:
3089
        env->segs[R_GS].base = val;
3090
        break;
3091
    case MSR_KERNELGSBASE:
3092
        env->kernelgsbase = val;
3093
        break;
3094
#endif
3095
    case MSR_MTRRphysBase(0):
3096
    case MSR_MTRRphysBase(1):
3097
    case MSR_MTRRphysBase(2):
3098
    case MSR_MTRRphysBase(3):
3099
    case MSR_MTRRphysBase(4):
3100
    case MSR_MTRRphysBase(5):
3101
    case MSR_MTRRphysBase(6):
3102
    case MSR_MTRRphysBase(7):
3103
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3104
        break;
3105
    case MSR_MTRRphysMask(0):
3106
    case MSR_MTRRphysMask(1):
3107
    case MSR_MTRRphysMask(2):
3108
    case MSR_MTRRphysMask(3):
3109
    case MSR_MTRRphysMask(4):
3110
    case MSR_MTRRphysMask(5):
3111
    case MSR_MTRRphysMask(6):
3112
    case MSR_MTRRphysMask(7):
3113
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3114
        break;
3115
    case MSR_MTRRfix64K_00000:
3116
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3117
        break;
3118
    case MSR_MTRRfix16K_80000:
3119
    case MSR_MTRRfix16K_A0000:
3120
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3121
        break;
3122
    case MSR_MTRRfix4K_C0000:
3123
    case MSR_MTRRfix4K_C8000:
3124
    case MSR_MTRRfix4K_D0000:
3125
    case MSR_MTRRfix4K_D8000:
3126
    case MSR_MTRRfix4K_E0000:
3127
    case MSR_MTRRfix4K_E8000:
3128
    case MSR_MTRRfix4K_F0000:
3129
    case MSR_MTRRfix4K_F8000:
3130
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3131
        break;
3132
    case MSR_MTRRdefType:
3133
        env->mtrr_deftype = val;
3134
        break;
3135
    case MSR_MCG_STATUS:
3136
        env->mcg_status = val;
3137
        break;
3138
    case MSR_MCG_CTL:
3139
        if ((env->mcg_cap & MCG_CTL_P)
3140
            && (val == 0 || val == ~(uint64_t)0))
3141
            env->mcg_ctl = val;
3142
        break;
3143
    default:
3144
        if ((uint32_t)ECX >= MSR_MC0_CTL
3145
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3146
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3147
            if ((offset & 0x3) != 0
3148
                || (val == 0 || val == ~(uint64_t)0))
3149
                env->mce_banks[offset] = val;
3150
            break;
3151
        }
3152
        /* XXX: exception ? */
3153
        break;
3154
    }
3155
}
3156

    
3157
void helper_rdmsr(void)
3158
{
3159
    uint64_t val;
3160

    
3161
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3162

    
3163
    switch((uint32_t)ECX) {
3164
    case MSR_IA32_SYSENTER_CS:
3165
        val = env->sysenter_cs;
3166
        break;
3167
    case MSR_IA32_SYSENTER_ESP:
3168
        val = env->sysenter_esp;
3169
        break;
3170
    case MSR_IA32_SYSENTER_EIP:
3171
        val = env->sysenter_eip;
3172
        break;
3173
    case MSR_IA32_APICBASE:
3174
        val = cpu_get_apic_base(env);
3175
        break;
3176
    case MSR_EFER:
3177
        val = env->efer;
3178
        break;
3179
    case MSR_STAR:
3180
        val = env->star;
3181
        break;
3182
    case MSR_PAT:
3183
        val = env->pat;
3184
        break;
3185
    case MSR_VM_HSAVE_PA:
3186
        val = env->vm_hsave;
3187
        break;
3188
    case MSR_IA32_PERF_STATUS:
3189
        /* tsc_increment_by_tick */
3190
        val = 1000ULL;
3191
        /* CPU multiplier */
3192
        val |= (((uint64_t)4ULL) << 40);
3193
        break;
3194
#ifdef TARGET_X86_64
3195
    case MSR_LSTAR:
3196
        val = env->lstar;
3197
        break;
3198
    case MSR_CSTAR:
3199
        val = env->cstar;
3200
        break;
3201
    case MSR_FMASK:
3202
        val = env->fmask;
3203
        break;
3204
    case MSR_FSBASE:
3205
        val = env->segs[R_FS].base;
3206
        break;
3207
    case MSR_GSBASE:
3208
        val = env->segs[R_GS].base;
3209
        break;
3210
    case MSR_KERNELGSBASE:
3211
        val = env->kernelgsbase;
3212
        break;
3213
#endif
3214
#ifdef CONFIG_KQEMU
3215
    case MSR_QPI_COMMBASE:
3216
        if (env->kqemu_enabled) {
3217
            val = kqemu_comm_base;
3218
        } else {
3219
            val = 0;
3220
        }
3221
        break;
3222
#endif
3223
    case MSR_MTRRphysBase(0):
3224
    case MSR_MTRRphysBase(1):
3225
    case MSR_MTRRphysBase(2):
3226
    case MSR_MTRRphysBase(3):
3227
    case MSR_MTRRphysBase(4):
3228
    case MSR_MTRRphysBase(5):
3229
    case MSR_MTRRphysBase(6):
3230
    case MSR_MTRRphysBase(7):
3231
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3232
        break;
3233
    case MSR_MTRRphysMask(0):
3234
    case MSR_MTRRphysMask(1):
3235
    case MSR_MTRRphysMask(2):
3236
    case MSR_MTRRphysMask(3):
3237
    case MSR_MTRRphysMask(4):
3238
    case MSR_MTRRphysMask(5):
3239
    case MSR_MTRRphysMask(6):
3240
    case MSR_MTRRphysMask(7):
3241
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3242
        break;
3243
    case MSR_MTRRfix64K_00000:
3244
        val = env->mtrr_fixed[0];
3245
        break;
3246
    case MSR_MTRRfix16K_80000:
3247
    case MSR_MTRRfix16K_A0000:
3248
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3249
        break;
3250
    case MSR_MTRRfix4K_C0000:
3251
    case MSR_MTRRfix4K_C8000:
3252
    case MSR_MTRRfix4K_D0000:
3253
    case MSR_MTRRfix4K_D8000:
3254
    case MSR_MTRRfix4K_E0000:
3255
    case MSR_MTRRfix4K_E8000:
3256
    case MSR_MTRRfix4K_F0000:
3257
    case MSR_MTRRfix4K_F8000:
3258
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3259
        break;
3260
    case MSR_MTRRdefType:
3261
        val = env->mtrr_deftype;
3262
        break;
3263
    case MSR_MTRRcap:
3264
        if (env->cpuid_features & CPUID_MTRR)
3265
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3266
        else
3267
            /* XXX: exception ? */
3268
            val = 0;
3269
        break;
3270
    case MSR_MCG_CAP:
3271
        val = env->mcg_cap;
3272
        break;
3273
    case MSR_MCG_CTL:
3274
        if (env->mcg_cap & MCG_CTL_P)
3275
            val = env->mcg_ctl;
3276
        else
3277
            val = 0;
3278
        break;
3279
    case MSR_MCG_STATUS:
3280
        val = env->mcg_status;
3281
        break;
3282
    default:
3283
        if ((uint32_t)ECX >= MSR_MC0_CTL
3284
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3285
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3286
            val = env->mce_banks[offset];
3287
            break;
3288
        }
3289
        /* XXX: exception ? */
3290
        val = 0;
3291
        break;
3292
    }
3293
    EAX = (uint32_t)(val);
3294
    EDX = (uint32_t)(val >> 32);
3295
}
3296
#endif
3297

    
3298
target_ulong helper_lsl(target_ulong selector1)
3299
{
3300
    unsigned int limit;
3301
    uint32_t e1, e2, eflags, selector;
3302
    int rpl, dpl, cpl, type;
3303

    
3304
    selector = selector1 & 0xffff;
3305
    eflags = helper_cc_compute_all(CC_OP);
3306
    if ((selector & 0xfffc) == 0)
3307
        goto fail;
3308
    if (load_segment(&e1, &e2, selector) != 0)
3309
        goto fail;
3310
    rpl = selector & 3;
3311
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3312
    cpl = env->hflags & HF_CPL_MASK;
3313
    if (e2 & DESC_S_MASK) {
3314
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3315
            /* conforming */
3316
        } else {
3317
            if (dpl < cpl || dpl < rpl)
3318
                goto fail;
3319
        }
3320
    } else {
3321
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3322
        switch(type) {
3323
        case 1:
3324
        case 2:
3325
        case 3:
3326
        case 9:
3327
        case 11:
3328
            break;
3329
        default:
3330
            goto fail;
3331
        }
3332
        if (dpl < cpl || dpl < rpl) {
3333
        fail:
3334
            CC_SRC = eflags & ~CC_Z;
3335
            return 0;
3336
        }
3337
    }
3338
    limit = get_seg_limit(e1, e2);
3339
    CC_SRC = eflags | CC_Z;
3340
    return limit;
3341
}
3342

    
3343
target_ulong helper_lar(target_ulong selector1)
3344
{
3345
    uint32_t e1, e2, eflags, selector;
3346
    int rpl, dpl, cpl, type;
3347

    
3348
    selector = selector1 & 0xffff;
3349
    eflags = helper_cc_compute_all(CC_OP);
3350
    if ((selector & 0xfffc) == 0)
3351
        goto fail;
3352
    if (load_segment(&e1, &e2, selector) != 0)
3353
        goto fail;
3354
    rpl = selector & 3;
3355
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3356
    cpl = env->hflags & HF_CPL_MASK;
3357
    if (e2 & DESC_S_MASK) {
3358
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3359
            /* conforming */
3360
        } else {
3361
            if (dpl < cpl || dpl < rpl)
3362
                goto fail;
3363
        }
3364
    } else {
3365
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3366
        switch(type) {
3367
        case 1:
3368
        case 2:
3369
        case 3:
3370
        case 4:
3371
        case 5:
3372
        case 9:
3373
        case 11:
3374
        case 12:
3375
            break;
3376
        default:
3377
            goto fail;
3378
        }
3379
        if (dpl < cpl || dpl < rpl) {
3380
        fail:
3381
            CC_SRC = eflags & ~CC_Z;
3382
            return 0;
3383
        }
3384
    }
3385
    CC_SRC = eflags | CC_Z;
3386
    return e2 & 0x00f0ff00;
3387
}
3388

    
3389
void helper_verr(target_ulong selector1)
3390
{
3391
    uint32_t e1, e2, eflags, selector;
3392
    int rpl, dpl, cpl;
3393

    
3394
    selector = selector1 & 0xffff;
3395
    eflags = helper_cc_compute_all(CC_OP);
3396
    if ((selector & 0xfffc) == 0)
3397
        goto fail;
3398
    if (load_segment(&e1, &e2, selector) != 0)
3399
        goto fail;
3400
    if (!(e2 & DESC_S_MASK))
3401
        goto fail;
3402
    rpl = selector & 3;
3403
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3404
    cpl = env->hflags & HF_CPL_MASK;
3405
    if (e2 & DESC_CS_MASK) {
3406
        if (!(e2 & DESC_R_MASK))
3407
            goto fail;
3408
        if (!(e2 & DESC_C_MASK)) {
3409
            if (dpl < cpl || dpl < rpl)
3410
                goto fail;
3411
        }
3412
    } else {
3413
        if (dpl < cpl || dpl < rpl) {
3414
        fail:
3415
            CC_SRC = eflags & ~CC_Z;
3416
            return;
3417
        }
3418
    }
3419
    CC_SRC = eflags | CC_Z;
3420
}
3421

    
3422
void helper_verw(target_ulong selector1)
3423
{
3424
    uint32_t e1, e2, eflags, selector;
3425
    int rpl, dpl, cpl;
3426

    
3427
    selector = selector1 & 0xffff;
3428
    eflags = helper_cc_compute_all(CC_OP);
3429
    if ((selector & 0xfffc) == 0)
3430
        goto fail;
3431
    if (load_segment(&e1, &e2, selector) != 0)
3432
        goto fail;
3433
    if (!(e2 & DESC_S_MASK))
3434
        goto fail;
3435
    rpl = selector & 3;
3436
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3437
    cpl = env->hflags & HF_CPL_MASK;
3438
    if (e2 & DESC_CS_MASK) {
3439
        goto fail;
3440
    } else {
3441
        if (dpl < cpl || dpl < rpl)
3442
            goto fail;
3443
        if (!(e2 & DESC_W_MASK)) {
3444
        fail:
3445
            CC_SRC = eflags & ~CC_Z;
3446
            return;
3447
        }
3448
    }
3449
    CC_SRC = eflags | CC_Z;
3450
}
3451

    
3452
/* x87 FPU helpers */
3453

    
3454
static void fpu_set_exception(int mask)
3455
{
3456
    env->fpus |= mask;
3457
    if (env->fpus & (~env->fpuc & FPUC_EM))
3458
        env->fpus |= FPUS_SE | FPUS_B;
3459
}
3460

    
3461
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3462
{
3463
    if (b == 0.0)
3464
        fpu_set_exception(FPUS_ZE);
3465
    return a / b;
3466
}
3467

    
3468
static void fpu_raise_exception(void)
3469
{
3470
    if (env->cr[0] & CR0_NE_MASK) {
3471
        raise_exception(EXCP10_COPR);
3472
    }
3473
#if !defined(CONFIG_USER_ONLY)
3474
    else {
3475
        cpu_set_ferr(env);
3476
    }
3477
#endif
3478
}
3479

    
3480
void helper_flds_FT0(uint32_t val)
3481
{
3482
    union {
3483
        float32 f;
3484
        uint32_t i;
3485
    } u;
3486
    u.i = val;
3487
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3488
}
3489

    
3490
void helper_fldl_FT0(uint64_t val)
3491
{
3492
    union {
3493
        float64 f;
3494
        uint64_t i;
3495
    } u;
3496
    u.i = val;
3497
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3498
}
3499

    
3500
void helper_fildl_FT0(int32_t val)
3501
{
3502
    FT0 = int32_to_floatx(val, &env->fp_status);
3503
}
3504

    
3505
void helper_flds_ST0(uint32_t val)
3506
{
3507
    int new_fpstt;
3508
    union {
3509
        float32 f;
3510
        uint32_t i;
3511
    } u;
3512
    new_fpstt = (env->fpstt - 1) & 7;
3513
    u.i = val;
3514
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3515
    env->fpstt = new_fpstt;
3516
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3517
}
3518

    
3519
void helper_fldl_ST0(uint64_t val)
3520
{
3521
    int new_fpstt;
3522
    union {
3523
        float64 f;
3524
        uint64_t i;
3525
    } u;
3526
    new_fpstt = (env->fpstt - 1) & 7;
3527
    u.i = val;
3528
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3529
    env->fpstt = new_fpstt;
3530
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3531
}
3532

    
3533
void helper_fildl_ST0(int32_t val)
3534
{
3535
    int new_fpstt;
3536
    new_fpstt = (env->fpstt - 1) & 7;
3537
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3538
    env->fpstt = new_fpstt;
3539
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3540
}
3541

    
3542
void helper_fildll_ST0(int64_t val)
3543
{
3544
    int new_fpstt;
3545
    new_fpstt = (env->fpstt - 1) & 7;
3546
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3547
    env->fpstt = new_fpstt;
3548
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3549
}
3550

    
3551
uint32_t helper_fsts_ST0(void)
3552
{
3553
    union {
3554
        float32 f;
3555
        uint32_t i;
3556
    } u;
3557
    u.f = floatx_to_float32(ST0, &env->fp_status);
3558
    return u.i;
3559
}
3560

    
3561
uint64_t helper_fstl_ST0(void)
3562
{
3563
    union {
3564
        float64 f;
3565
        uint64_t i;
3566
    } u;
3567
    u.f = floatx_to_float64(ST0, &env->fp_status);
3568
    return u.i;
3569
}
3570

    
3571
int32_t helper_fist_ST0(void)
3572
{
3573
    int32_t val;
3574
    val = floatx_to_int32(ST0, &env->fp_status);
3575
    if (val != (int16_t)val)
3576
        val = -32768;
3577
    return val;
3578
}
3579

    
3580
int32_t helper_fistl_ST0(void)
3581
{
3582
    int32_t val;
3583
    val = floatx_to_int32(ST0, &env->fp_status);
3584
    return val;
3585
}
3586

    
3587
int64_t helper_fistll_ST0(void)
3588
{
3589
    int64_t val;
3590
    val = floatx_to_int64(ST0, &env->fp_status);
3591
    return val;
3592
}
3593

    
3594
int32_t helper_fistt_ST0(void)
3595
{
3596
    int32_t val;
3597
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3598
    if (val != (int16_t)val)
3599
        val = -32768;
3600
    return val;
3601
}
3602

    
3603
int32_t helper_fisttl_ST0(void)
3604
{
3605
    int32_t val;
3606
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3607
    return val;
3608
}
3609

    
3610
int64_t helper_fisttll_ST0(void)
3611
{
3612
    int64_t val;
3613
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3614
    return val;
3615
}
3616

    
3617
void helper_fldt_ST0(target_ulong ptr)
3618
{
3619
    int new_fpstt;
3620
    new_fpstt = (env->fpstt - 1) & 7;
3621
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3622
    env->fpstt = new_fpstt;
3623
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3624
}
3625

    
3626
void helper_fstt_ST0(target_ulong ptr)
3627
{
3628
    helper_fstt(ST0, ptr);
3629
}
3630

    
3631
void helper_fpush(void)
3632
{
3633
    fpush();
3634
}
3635

    
3636
void helper_fpop(void)
3637
{
3638
    fpop();
3639
}
3640

    
3641
void helper_fdecstp(void)
3642
{
3643
    env->fpstt = (env->fpstt - 1) & 7;
3644
    env->fpus &= (~0x4700);
3645
}
3646

    
3647
void helper_fincstp(void)
3648
{
3649
    env->fpstt = (env->fpstt + 1) & 7;
3650
    env->fpus &= (~0x4700);
3651
}
3652

    
3653
/* FPU move */
3654

    
3655
void helper_ffree_STN(int st_index)
3656
{
3657
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3658
}
3659

    
3660
void helper_fmov_ST0_FT0(void)
3661
{
3662
    ST0 = FT0;
3663
}
3664

    
3665
void helper_fmov_FT0_STN(int st_index)
3666
{
3667
    FT0 = ST(st_index);
3668
}
3669

    
3670
void helper_fmov_ST0_STN(int st_index)
3671
{
3672
    ST0 = ST(st_index);
3673
}
3674

    
3675
void helper_fmov_STN_ST0(int st_index)
3676
{
3677
    ST(st_index) = ST0;
3678
}
3679

    
3680
void helper_fxchg_ST0_STN(int st_index)
3681
{
3682
    CPU86_LDouble tmp;
3683
    tmp = ST(st_index);
3684
    ST(st_index) = ST0;
3685
    ST0 = tmp;
3686
}
3687

    
3688
/* FPU operations */
3689

    
3690
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3691

    
3692
void helper_fcom_ST0_FT0(void)
3693
{
3694
    int ret;
3695

    
3696
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3697
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3698
}
3699

    
3700
void helper_fucom_ST0_FT0(void)
3701
{
3702
    int ret;
3703

    
3704
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3705
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3706
}
3707

    
3708
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3709

    
3710
void helper_fcomi_ST0_FT0(void)
3711
{
3712
    int eflags;
3713
    int ret;
3714

    
3715
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3716
    eflags = helper_cc_compute_all(CC_OP);
3717
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3718
    CC_SRC = eflags;
3719
}
3720

    
3721
void helper_fucomi_ST0_FT0(void)
3722
{
3723
    int eflags;
3724
    int ret;
3725

    
3726
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3727
    eflags = helper_cc_compute_all(CC_OP);
3728
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3729
    CC_SRC = eflags;
3730
}
3731

    
3732
void helper_fadd_ST0_FT0(void)
3733
{
3734
    ST0 += FT0;
3735
}
3736

    
3737
void helper_fmul_ST0_FT0(void)
3738
{
3739
    ST0 *= FT0;
3740
}
3741

    
3742
void helper_fsub_ST0_FT0(void)
3743
{
3744
    ST0 -= FT0;
3745
}
3746

    
3747
void helper_fsubr_ST0_FT0(void)
3748
{
3749
    ST0 = FT0 - ST0;
3750
}
3751

    
3752
void helper_fdiv_ST0_FT0(void)
3753
{
3754
    ST0 = helper_fdiv(ST0, FT0);
3755
}
3756

    
3757
void helper_fdivr_ST0_FT0(void)
3758
{
3759
    ST0 = helper_fdiv(FT0, ST0);
3760
}
3761

    
3762
/* fp operations between STN and ST0 */
3763

    
3764
void helper_fadd_STN_ST0(int st_index)
3765
{
3766
    ST(st_index) += ST0;
3767
}
3768

    
3769
void helper_fmul_STN_ST0(int st_index)
3770
{
3771
    ST(st_index) *= ST0;
3772
}
3773

    
3774
void helper_fsub_STN_ST0(int st_index)
3775
{
3776
    ST(st_index) -= ST0;
3777
}
3778

    
3779
void helper_fsubr_STN_ST0(int st_index)
3780
{
3781
    CPU86_LDouble *p;
3782
    p = &ST(st_index);
3783
    *p = ST0 - *p;
3784
}
3785

    
3786
void helper_fdiv_STN_ST0(int st_index)
3787
{
3788
    CPU86_LDouble *p;
3789
    p = &ST(st_index);
3790
    *p = helper_fdiv(*p, ST0);
3791
}
3792

    
3793
void helper_fdivr_STN_ST0(int st_index)
3794
{
3795
    CPU86_LDouble *p;
3796
    p = &ST(st_index);
3797
    *p = helper_fdiv(ST0, *p);
3798
}
3799

    
3800
/* misc FPU operations */
3801
void helper_fchs_ST0(void)
3802
{
3803
    ST0 = floatx_chs(ST0);
3804
}
3805

    
3806
void helper_fabs_ST0(void)
3807
{
3808
    ST0 = floatx_abs(ST0);
3809
}
3810

    
3811
void helper_fld1_ST0(void)
3812
{
3813
    ST0 = f15rk[1];
3814
}
3815

    
3816
void helper_fldl2t_ST0(void)
3817
{
3818
    ST0 = f15rk[6];
3819
}
3820

    
3821
void helper_fldl2e_ST0(void)
3822
{
3823
    ST0 = f15rk[5];
3824
}
3825

    
3826
void helper_fldpi_ST0(void)
3827
{
3828
    ST0 = f15rk[2];
3829
}
3830

    
3831
void helper_fldlg2_ST0(void)
3832
{
3833
    ST0 = f15rk[3];
3834
}
3835

    
3836
void helper_fldln2_ST0(void)
3837
{
3838
    ST0 = f15rk[4];
3839
}
3840

    
3841
void helper_fldz_ST0(void)
3842
{
3843
    ST0 = f15rk[0];
3844
}
3845

    
3846
void helper_fldz_FT0(void)
3847
{
3848
    FT0 = f15rk[0];
3849
}
3850

    
3851
uint32_t helper_fnstsw(void)
3852
{
3853
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3854
}
3855

    
3856
uint32_t helper_fnstcw(void)
3857
{
3858
    return env->fpuc;
3859
}
3860

    
3861
static void update_fp_status(void)
3862
{
3863
    int rnd_type;
3864

    
3865
    /* set rounding mode */
3866
    switch(env->fpuc & RC_MASK) {
3867
    default:
3868
    case RC_NEAR:
3869
        rnd_type = float_round_nearest_even;
3870
        break;
3871
    case RC_DOWN:
3872
        rnd_type = float_round_down;
3873
        break;
3874
    case RC_UP:
3875
        rnd_type = float_round_up;
3876
        break;
3877
    case RC_CHOP:
3878
        rnd_type = float_round_to_zero;
3879
        break;
3880
    }
3881
    set_float_rounding_mode(rnd_type, &env->fp_status);
3882
#ifdef FLOATX80
3883
    switch((env->fpuc >> 8) & 3) {
3884
    case 0:
3885
        rnd_type = 32;
3886
        break;
3887
    case 2:
3888
        rnd_type = 64;
3889
        break;
3890
    case 3:
3891
    default:
3892
        rnd_type = 80;
3893
        break;
3894
    }
3895
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3896
#endif
3897
}
3898

    
3899
void helper_fldcw(uint32_t val)
3900
{
3901
    env->fpuc = val;
3902
    update_fp_status();
3903
}
3904

    
3905
void helper_fclex(void)
3906
{
3907
    env->fpus &= 0x7f00;
3908
}
3909

    
3910
void helper_fwait(void)
3911
{
3912
    if (env->fpus & FPUS_SE)
3913
        fpu_raise_exception();
3914
}
3915

    
3916
void helper_fninit(void)
3917
{
3918
    env->fpus = 0;
3919
    env->fpstt = 0;
3920
    env->fpuc = 0x37f;
3921
    env->fptags[0] = 1;
3922
    env->fptags[1] = 1;
3923
    env->fptags[2] = 1;
3924
    env->fptags[3] = 1;
3925
    env->fptags[4] = 1;
3926
    env->fptags[5] = 1;
3927
    env->fptags[6] = 1;
3928
    env->fptags[7] = 1;
3929
}
3930

    
3931
/* BCD ops */
3932

    
3933
void helper_fbld_ST0(target_ulong ptr)
3934
{
3935
    CPU86_LDouble tmp;
3936
    uint64_t val;
3937
    unsigned int v;
3938
    int i;
3939

    
3940
    val = 0;
3941
    for(i = 8; i >= 0; i--) {
3942
        v = ldub(ptr + i);
3943
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3944
    }
3945
    tmp = val;
3946
    if (ldub(ptr + 9) & 0x80)
3947
        tmp = -tmp;
3948
    fpush();
3949
    ST0 = tmp;
3950
}
3951

    
3952
void helper_fbst_ST0(target_ulong ptr)
3953
{
3954
    int v;
3955
    target_ulong mem_ref, mem_end;
3956
    int64_t val;
3957

    
3958
    val = floatx_to_int64(ST0, &env->fp_status);
3959
    mem_ref = ptr;
3960
    mem_end = mem_ref + 9;
3961
    if (val < 0) {
3962
        stb(mem_end, 0x80);
3963
        val = -val;
3964
    } else {
3965
        stb(mem_end, 0x00);
3966
    }
3967
    while (mem_ref < mem_end) {
3968
        if (val == 0)
3969
            break;
3970
        v = val % 100;
3971
        val = val / 100;
3972
        v = ((v / 10) << 4) | (v % 10);
3973
        stb(mem_ref++, v);
3974
    }
3975
    while (mem_ref < mem_end) {
3976
        stb(mem_ref++, 0);
3977
    }
3978
}
3979

    
3980
void helper_f2xm1(void)
3981
{
3982
    ST0 = pow(2.0,ST0) - 1.0;
3983
}
3984

    
3985
void helper_fyl2x(void)
3986
{
3987
    CPU86_LDouble fptemp;
3988

    
3989
    fptemp = ST0;
3990
    if (fptemp>0.0){
3991
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3992
        ST1 *= fptemp;
3993
        fpop();
3994
    } else {
3995
        env->fpus &= (~0x4700);
3996
        env->fpus |= 0x400;
3997
    }
3998
}
3999

    
4000
void helper_fptan(void)
4001
{
4002
    CPU86_LDouble fptemp;
4003

    
4004
    fptemp = ST0;
4005
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4006
        env->fpus |= 0x400;
4007
    } else {
4008
        ST0 = tan(fptemp);
4009
        fpush();
4010
        ST0 = 1.0;
4011
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4012
        /* the above code is for  |arg| < 2**52 only */
4013
    }
4014
}
4015

    
4016
void helper_fpatan(void)
4017
{
4018
    CPU86_LDouble fptemp, fpsrcop;
4019

    
4020
    fpsrcop = ST1;
4021
    fptemp = ST0;
4022
    ST1 = atan2(fpsrcop,fptemp);
4023
    fpop();
4024
}
4025

    
4026
void helper_fxtract(void)
4027
{
4028
    CPU86_LDoubleU temp;
4029
    unsigned int expdif;
4030

    
4031
    temp.d = ST0;
4032
    expdif = EXPD(temp) - EXPBIAS;
4033
    /*DP exponent bias*/
4034
    ST0 = expdif;
4035
    fpush();
4036
    BIASEXPONENT(temp);
4037
    ST0 = temp.d;
4038
}
4039

    
4040
void helper_fprem1(void)
4041
{
4042
    CPU86_LDouble dblq, fpsrcop, fptemp;
4043
    CPU86_LDoubleU fpsrcop1, fptemp1;
4044
    int expdif;
4045
    signed long long int q;
4046

    
4047
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4048
        ST0 = 0.0 / 0.0; /* NaN */
4049
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4050
        return;
4051
    }
4052

    
4053
    fpsrcop = ST0;
4054
    fptemp = ST1;
4055
    fpsrcop1.d = fpsrcop;
4056
    fptemp1.d = fptemp;
4057
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4058

    
4059
    if (expdif < 0) {
4060
        /* optimisation? taken from the AMD docs */
4061
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4062
        /* ST0 is unchanged */
4063
        return;
4064
    }
4065

    
4066
    if (expdif < 53) {
4067
        dblq = fpsrcop / fptemp;
4068
        /* round dblq towards nearest integer */
4069
        dblq = rint(dblq);
4070
        ST0 = fpsrcop - fptemp * dblq;
4071

    
4072
        /* convert dblq to q by truncating towards zero */
4073
        if (dblq < 0.0)
4074
           q = (signed long long int)(-dblq);
4075
        else
4076
           q = (signed long long int)dblq;
4077

    
4078
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4079
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4080
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4081
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4082
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4083
    } else {
4084
        env->fpus |= 0x400;  /* C2 <-- 1 */
4085
        fptemp = pow(2.0, expdif - 50);
4086
        fpsrcop = (ST0 / ST1) / fptemp;
4087
        /* fpsrcop = integer obtained by chopping */
4088
        fpsrcop = (fpsrcop < 0.0) ?
4089
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4090
        ST0 -= (ST1 * fpsrcop * fptemp);
4091
    }
4092
}
4093

    
4094
void helper_fprem(void)
4095
{
4096
    CPU86_LDouble dblq, fpsrcop, fptemp;
4097
    CPU86_LDoubleU fpsrcop1, fptemp1;
4098
    int expdif;
4099
    signed long long int q;
4100

    
4101
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4102
       ST0 = 0.0 / 0.0; /* NaN */
4103
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4104
       return;
4105
    }
4106

    
4107
    fpsrcop = (CPU86_LDouble)ST0;
4108
    fptemp = (CPU86_LDouble)ST1;
4109
    fpsrcop1.d = fpsrcop;
4110
    fptemp1.d = fptemp;
4111
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4112

    
4113
    if (expdif < 0) {
4114
        /* optimisation? taken from the AMD docs */
4115
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116
        /* ST0 is unchanged */
4117
        return;
4118
    }
4119

    
4120
    if ( expdif < 53 ) {
4121
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4122
        /* round dblq towards zero */
4123
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4124
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4125

    
4126
        /* convert dblq to q by truncating towards zero */
4127
        if (dblq < 0.0)
4128
           q = (signed long long int)(-dblq);
4129
        else
4130
           q = (signed long long int)dblq;
4131

    
4132
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4133
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4134
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4135
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4136
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4137
    } else {
4138
        int N = 32 + (expdif % 32); /* as per AMD docs */
4139
        env->fpus |= 0x400;  /* C2 <-- 1 */
4140
        fptemp = pow(2.0, (double)(expdif - N));
4141
        fpsrcop = (ST0 / ST1) / fptemp;
4142
        /* fpsrcop = integer obtained by chopping */
4143
        fpsrcop = (fpsrcop < 0.0) ?
4144
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4145
        ST0 -= (ST1 * fpsrcop * fptemp);
4146
    }
4147
}
4148

    
4149
void helper_fyl2xp1(void)
4150
{
4151
    CPU86_LDouble fptemp;
4152

    
4153
    fptemp = ST0;
4154
    if ((fptemp+1.0)>0.0) {
4155
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4156
        ST1 *= fptemp;
4157
        fpop();
4158
    } else {
4159
        env->fpus &= (~0x4700);
4160
        env->fpus |= 0x400;
4161
    }
4162
}
4163

    
4164
void helper_fsqrt(void)
4165
{
4166
    CPU86_LDouble fptemp;
4167

    
4168
    fptemp = ST0;
4169
    if (fptemp<0.0) {
4170
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4171
        env->fpus |= 0x400;
4172
    }
4173
    ST0 = sqrt(fptemp);
4174
}
4175

    
4176
void helper_fsincos(void)
4177
{
4178
    CPU86_LDouble fptemp;
4179

    
4180
    fptemp = ST0;
4181
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4182
        env->fpus |= 0x400;
4183
    } else {
4184
        ST0 = sin(fptemp);
4185
        fpush();
4186
        ST0 = cos(fptemp);
4187
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4188
        /* the above code is for  |arg| < 2**63 only */
4189
    }
4190
}
4191

    
4192
void helper_frndint(void)
4193
{
4194
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4195
}
4196

    
4197
void helper_fscale(void)
4198
{
4199
    ST0 = ldexp (ST0, (int)(ST1));
4200
}
4201

    
4202
void helper_fsin(void)
4203
{
4204
    CPU86_LDouble fptemp;
4205

    
4206
    fptemp = ST0;
4207
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4208
        env->fpus |= 0x400;
4209
    } else {
4210
        ST0 = sin(fptemp);
4211
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4212
        /* the above code is for  |arg| < 2**53 only */
4213
    }
4214
}
4215

    
4216
void helper_fcos(void)
4217
{
4218
    CPU86_LDouble fptemp;
4219

    
4220
    fptemp = ST0;
4221
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4222
        env->fpus |= 0x400;
4223
    } else {
4224
        ST0 = cos(fptemp);
4225
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4226
        /* the above code is for  |arg5 < 2**63 only */
4227
    }
4228
}
4229

    
4230
void helper_fxam_ST0(void)
4231
{
4232
    CPU86_LDoubleU temp;
4233
    int expdif;
4234

    
4235
    temp.d = ST0;
4236

    
4237
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4238
    if (SIGND(temp))
4239
        env->fpus |= 0x200; /* C1 <-- 1 */
4240

    
4241
    /* XXX: test fptags too */
4242
    expdif = EXPD(temp);
4243
    if (expdif == MAXEXPD) {
4244
#ifdef USE_X86LDOUBLE
4245
        if (MANTD(temp) == 0x8000000000000000ULL)
4246
#else
4247
        if (MANTD(temp) == 0)
4248
#endif
4249
            env->fpus |=  0x500 /*Infinity*/;
4250
        else
4251
            env->fpus |=  0x100 /*NaN*/;
4252
    } else if (expdif == 0) {
4253
        if (MANTD(temp) == 0)
4254
            env->fpus |=  0x4000 /*Zero*/;
4255
        else
4256
            env->fpus |= 0x4400 /*Denormal*/;
4257
    } else {
4258
        env->fpus |= 0x400;
4259
    }
4260
}
4261

    
4262
void helper_fstenv(target_ulong ptr, int data32)
4263
{
4264
    int fpus, fptag, exp, i;
4265
    uint64_t mant;
4266
    CPU86_LDoubleU tmp;
4267

    
4268
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4269
    fptag = 0;
4270
    for (i=7; i>=0; i--) {
4271
        fptag <<= 2;
4272
        if (env->fptags[i]) {
4273
            fptag |= 3;
4274
        } else {
4275
            tmp.d = env->fpregs[i].d;
4276
            exp = EXPD(tmp);
4277
            mant = MANTD(tmp);
4278
            if (exp == 0 && mant == 0) {
4279
                /* zero */
4280
                fptag |= 1;
4281
            } else if (exp == 0 || exp == MAXEXPD
4282
#ifdef USE_X86LDOUBLE
4283
                       || (mant & (1LL << 63)) == 0
4284
#endif
4285
                       ) {
4286
                /* NaNs, infinity, denormal */
4287
                fptag |= 2;
4288
            }
4289
        }
4290
    }
4291
    if (data32) {
4292
        /* 32 bit */
4293
        stl(ptr, env->fpuc);
4294
        stl(ptr + 4, fpus);
4295
        stl(ptr + 8, fptag);
4296
        stl(ptr + 12, 0); /* fpip */
4297
        stl(ptr + 16, 0); /* fpcs */
4298
        stl(ptr + 20, 0); /* fpoo */
4299
        stl(ptr + 24, 0); /* fpos */
4300
    } else {
4301
        /* 16 bit */
4302
        stw(ptr, env->fpuc);
4303
        stw(ptr + 2, fpus);
4304
        stw(ptr + 4, fptag);
4305
        stw(ptr + 6, 0);
4306
        stw(ptr + 8, 0);
4307
        stw(ptr + 10, 0);
4308
        stw(ptr + 12, 0);
4309
    }
4310
}
4311

    
4312
void helper_fldenv(target_ulong ptr, int data32)
4313
{
4314
    int i, fpus, fptag;
4315

    
4316
    if (data32) {
4317
        env->fpuc = lduw(ptr);
4318
        fpus = lduw(ptr + 4);
4319
        fptag = lduw(ptr + 8);
4320
    }
4321
    else {
4322
        env->fpuc = lduw(ptr);
4323
        fpus = lduw(ptr + 2);
4324
        fptag = lduw(ptr + 4);
4325
    }
4326
    env->fpstt = (fpus >> 11) & 7;
4327
    env->fpus = fpus & ~0x3800;
4328
    for(i = 0;i < 8; i++) {
4329
        env->fptags[i] = ((fptag & 3) == 3);
4330
        fptag >>= 2;
4331
    }
4332
}
4333

    
4334
void helper_fsave(target_ulong ptr, int data32)
4335
{
4336
    CPU86_LDouble tmp;
4337
    int i;
4338

    
4339
    helper_fstenv(ptr, data32);
4340

    
4341
    ptr += (14 << data32);
4342
    for(i = 0;i < 8; i++) {
4343
        tmp = ST(i);
4344
        helper_fstt(tmp, ptr);
4345
        ptr += 10;
4346
    }
4347

    
4348
    /* fninit */
4349
    env->fpus = 0;
4350
    env->fpstt = 0;
4351
    env->fpuc = 0x37f;
4352
    env->fptags[0] = 1;
4353
    env->fptags[1] = 1;
4354
    env->fptags[2] = 1;
4355
    env->fptags[3] = 1;
4356
    env->fptags[4] = 1;
4357
    env->fptags[5] = 1;
4358
    env->fptags[6] = 1;
4359
    env->fptags[7] = 1;
4360
}
4361

    
4362
void helper_frstor(target_ulong ptr, int data32)
4363
{
4364
    CPU86_LDouble tmp;
4365
    int i;
4366

    
4367
    helper_fldenv(ptr, data32);
4368
    ptr += (14 << data32);
4369

    
4370
    for(i = 0;i < 8; i++) {
4371
        tmp = helper_fldt(ptr);
4372
        ST(i) = tmp;
4373
        ptr += 10;
4374
    }
4375
}
4376

    
4377
void helper_fxsave(target_ulong ptr, int data64)
4378
{
4379
    int fpus, fptag, i, nb_xmm_regs;
4380
    CPU86_LDouble tmp;
4381
    target_ulong addr;
4382

    
4383
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4384
    fptag = 0;
4385
    for(i = 0; i < 8; i++) {
4386
        fptag |= (env->fptags[i] << i);
4387
    }
4388
    stw(ptr, env->fpuc);
4389
    stw(ptr + 2, fpus);
4390
    stw(ptr + 4, fptag ^ 0xff);
4391
#ifdef TARGET_X86_64
4392
    if (data64) {
4393
        stq(ptr + 0x08, 0); /* rip */
4394
        stq(ptr + 0x10, 0); /* rdp */
4395
    } else 
4396
#endif
4397
    {
4398
        stl(ptr + 0x08, 0); /* eip */
4399
        stl(ptr + 0x0c, 0); /* sel  */
4400
        stl(ptr + 0x10, 0); /* dp */
4401
        stl(ptr + 0x14, 0); /* sel  */
4402
    }
4403

    
4404
    addr = ptr + 0x20;
4405
    for(i = 0;i < 8; i++) {
4406
        tmp = ST(i);
4407
        helper_fstt(tmp, addr);
4408
        addr += 16;
4409
    }
4410

    
4411
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4412
        /* XXX: finish it */
4413
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4414
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4415
        if (env->hflags & HF_CS64_MASK)
4416
            nb_xmm_regs = 16;
4417
        else
4418
            nb_xmm_regs = 8;
4419
        addr = ptr + 0xa0;
4420
        /* Fast FXSAVE leaves out the XMM registers */
4421
        if (!(env->efer & MSR_EFER_FFXSR)
4422
          || (env->hflags & HF_CPL_MASK)
4423
          || !(env->hflags & HF_LMA_MASK)) {
4424
            for(i = 0; i < nb_xmm_regs; i++) {
4425
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4426
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4427
                addr += 16;
4428
            }
4429
        }
4430
    }
4431
}
4432

    
4433
void helper_fxrstor(target_ulong ptr, int data64)
4434
{
4435
    int i, fpus, fptag, nb_xmm_regs;
4436
    CPU86_LDouble tmp;
4437
    target_ulong addr;
4438

    
4439
    env->fpuc = lduw(ptr);
4440
    fpus = lduw(ptr + 2);
4441
    fptag = lduw(ptr + 4);
4442
    env->fpstt = (fpus >> 11) & 7;
4443
    env->fpus = fpus & ~0x3800;
4444
    fptag ^= 0xff;
4445
    for(i = 0;i < 8; i++) {
4446
        env->fptags[i] = ((fptag >> i) & 1);
4447
    }
4448

    
4449
    addr = ptr + 0x20;
4450
    for(i = 0;i < 8; i++) {
4451
        tmp = helper_fldt(addr);
4452
        ST(i) = tmp;
4453
        addr += 16;
4454
    }
4455

    
4456
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4457
        /* XXX: finish it */
4458
        env->mxcsr = ldl(ptr + 0x18);
4459
        //ldl(ptr + 0x1c);
4460
        if (env->hflags & HF_CS64_MASK)
4461
            nb_xmm_regs = 16;
4462
        else
4463
            nb_xmm_regs = 8;
4464
        addr = ptr + 0xa0;
4465
        /* Fast FXRESTORE leaves out the XMM registers */
4466
        if (!(env->efer & MSR_EFER_FFXSR)
4467
          || (env->hflags & HF_CPL_MASK)
4468
          || !(env->hflags & HF_LMA_MASK)) {
4469
            for(i = 0; i < nb_xmm_regs; i++) {
4470
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4471
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4472
                addr += 16;
4473
            }
4474
        }
4475
    }
4476
}
4477

    
4478
#ifndef USE_X86LDOUBLE
4479

    
4480
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4481
{
4482
    CPU86_LDoubleU temp;
4483
    int e;
4484

    
4485
    temp.d = f;
4486
    /* mantissa */
4487
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4488
    /* exponent + sign */
4489
    e = EXPD(temp) - EXPBIAS + 16383;
4490
    e |= SIGND(temp) >> 16;
4491
    *pexp = e;
4492
}
4493

    
4494
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4495
{
4496
    CPU86_LDoubleU temp;
4497
    int e;
4498
    uint64_t ll;
4499

    
4500
    /* XXX: handle overflow ? */
4501
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4502
    e |= (upper >> 4) & 0x800; /* sign */
4503
    ll = (mant >> 11) & ((1LL << 52) - 1);
4504
#ifdef __arm__
4505
    temp.l.upper = (e << 20) | (ll >> 32);
4506
    temp.l.lower = ll;
4507
#else
4508
    temp.ll = ll | ((uint64_t)e << 52);
4509
#endif
4510
    return temp.d;
4511
}
4512

    
4513
#else
4514

    
4515
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4516
{
4517
    CPU86_LDoubleU temp;
4518

    
4519
    temp.d = f;
4520
    *pmant = temp.l.lower;
4521
    *pexp = temp.l.upper;
4522
}
4523

    
4524
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4525
{
4526
    CPU86_LDoubleU temp;
4527

    
4528
    temp.l.upper = upper;
4529
    temp.l.lower = mant;
4530
    return temp.d;
4531
}
4532
#endif
4533

    
4534
#ifdef TARGET_X86_64
4535

    
4536
//#define DEBUG_MULDIV
4537

    
4538
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4539
{
4540
    *plow += a;
4541
    /* carry test */
4542
    if (*plow < a)
4543
        (*phigh)++;
4544
    *phigh += b;
4545
}
4546

    
4547
static void neg128(uint64_t *plow, uint64_t *phigh)
4548
{
4549
    *plow = ~ *plow;
4550
    *phigh = ~ *phigh;
4551
    add128(plow, phigh, 1, 0);
4552
}
4553

    
4554
/* return TRUE if overflow */
4555
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4556
{
4557
    uint64_t q, r, a1, a0;
4558
    int i, qb, ab;
4559

    
4560
    a0 = *plow;
4561
    a1 = *phigh;
4562
    if (a1 == 0) {
4563
        q = a0 / b;
4564
        r = a0 % b;
4565
        *plow = q;
4566
        *phigh = r;
4567
    } else {
4568
        if (a1 >= b)
4569
            return 1;
4570
        /* XXX: use a better algorithm */
4571
        for(i = 0; i < 64; i++) {
4572
            ab = a1 >> 63;
4573
            a1 = (a1 << 1) | (a0 >> 63);
4574
            if (ab || a1 >= b) {
4575
                a1 -= b;
4576
                qb = 1;
4577
            } else {
4578
                qb = 0;
4579
            }
4580
            a0 = (a0 << 1) | qb;
4581
        }
4582
#if defined(DEBUG_MULDIV)
4583
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4584
               *phigh, *plow, b, a0, a1);
4585
#endif
4586
        *plow = a0;
4587
        *phigh = a1;
4588
    }
4589
    return 0;
4590
}
4591

    
4592
/* return TRUE if overflow */
4593
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4594
{
4595
    int sa, sb;
4596
    sa = ((int64_t)*phigh < 0);
4597
    if (sa)
4598
        neg128(plow, phigh);
4599
    sb = (b < 0);
4600
    if (sb)
4601
        b = -b;
4602
    if (div64(plow, phigh, b) != 0)
4603
        return 1;
4604
    if (sa ^ sb) {
4605
        if (*plow > (1ULL << 63))
4606
            return 1;
4607
        *plow = - *plow;
4608
    } else {
4609
        if (*plow >= (1ULL << 63))
4610
            return 1;
4611
    }
4612
    if (sa)
4613
        *phigh = - *phigh;
4614
    return 0;
4615
}
4616

    
4617
void helper_mulq_EAX_T0(target_ulong t0)
4618
{
4619
    uint64_t r0, r1;
4620

    
4621
    mulu64(&r0, &r1, EAX, t0);
4622
    EAX = r0;
4623
    EDX = r1;
4624
    CC_DST = r0;
4625
    CC_SRC = r1;
4626
}
4627

    
4628
void helper_imulq_EAX_T0(target_ulong t0)
4629
{
4630
    uint64_t r0, r1;
4631

    
4632
    muls64(&r0, &r1, EAX, t0);
4633
    EAX = r0;
4634
    EDX = r1;
4635
    CC_DST = r0;
4636
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4637
}
4638

    
4639
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4640
{
4641
    uint64_t r0, r1;
4642

    
4643
    muls64(&r0, &r1, t0, t1);
4644
    CC_DST = r0;
4645
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4646
    return r0;
4647
}
4648

    
4649
void helper_divq_EAX(target_ulong t0)
4650
{
4651
    uint64_t r0, r1;
4652
    if (t0 == 0) {
4653
        raise_exception(EXCP00_DIVZ);
4654
    }
4655
    r0 = EAX;
4656
    r1 = EDX;
4657
    if (div64(&r0, &r1, t0))
4658
        raise_exception(EXCP00_DIVZ);
4659
    EAX = r0;
4660
    EDX = r1;
4661
}
4662

    
4663
void helper_idivq_EAX(target_ulong t0)
4664
{
4665
    uint64_t r0, r1;
4666
    if (t0 == 0) {
4667
        raise_exception(EXCP00_DIVZ);
4668
    }
4669
    r0 = EAX;
4670
    r1 = EDX;
4671
    if (idiv64(&r0, &r1, t0))
4672
        raise_exception(EXCP00_DIVZ);
4673
    EAX = r0;
4674
    EDX = r1;
4675
}
4676
#endif
4677

    
4678
static void do_hlt(void)
4679
{
4680
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4681
    env->halted = 1;
4682
    env->exception_index = EXCP_HLT;
4683
    cpu_loop_exit();
4684
}
4685

    
4686
void helper_hlt(int next_eip_addend)
4687
{
4688
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4689
    EIP += next_eip_addend;
4690
    
4691
    do_hlt();
4692
}
4693

    
4694
void helper_monitor(target_ulong ptr)
4695
{
4696
    if ((uint32_t)ECX != 0)
4697
        raise_exception(EXCP0D_GPF);
4698
    /* XXX: store address ? */
4699
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4700
}
4701

    
4702
void helper_mwait(int next_eip_addend)
4703
{
4704
    if ((uint32_t)ECX != 0)
4705
        raise_exception(EXCP0D_GPF);
4706
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4707
    EIP += next_eip_addend;
4708

    
4709
    /* XXX: not complete but not completely erroneous */
4710
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4711
        /* more than one CPU: do not sleep because another CPU may
4712
           wake this one */
4713
    } else {
4714
        do_hlt();
4715
    }
4716
}
4717

    
4718
void helper_debug(void)
4719
{
4720
    env->exception_index = EXCP_DEBUG;
4721
    cpu_loop_exit();
4722
}
4723

    
4724
void helper_reset_rf(void)
4725
{
4726
    env->eflags &= ~RF_MASK;
4727
}
4728

    
4729
void helper_raise_interrupt(int intno, int next_eip_addend)
4730
{
4731
    raise_interrupt(intno, 1, 0, next_eip_addend);
4732
}
4733

    
4734
void helper_raise_exception(int exception_index)
4735
{
4736
    raise_exception(exception_index);
4737
}
4738

    
4739
void helper_cli(void)
4740
{
4741
    env->eflags &= ~IF_MASK;
4742
}
4743

    
4744
void helper_sti(void)
4745
{
4746
    env->eflags |= IF_MASK;
4747
}
4748

    
4749
#if 0
4750
/* vm86plus instructions */
4751
void helper_cli_vm(void)
4752
{
4753
    env->eflags &= ~VIF_MASK;
4754
}
4755

4756
void helper_sti_vm(void)
4757
{
4758
    env->eflags |= VIF_MASK;
4759
    if (env->eflags & VIP_MASK) {
4760
        raise_exception(EXCP0D_GPF);
4761
    }
4762
}
4763
#endif
4764

    
4765
void helper_set_inhibit_irq(void)
4766
{
4767
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4768
}
4769

    
4770
void helper_reset_inhibit_irq(void)
4771
{
4772
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4773
}
4774

    
4775
void helper_boundw(target_ulong a0, int v)
4776
{
4777
    int low, high;
4778
    low = ldsw(a0);
4779
    high = ldsw(a0 + 2);
4780
    v = (int16_t)v;
4781
    if (v < low || v > high) {
4782
        raise_exception(EXCP05_BOUND);
4783
    }
4784
}
4785

    
4786
void helper_boundl(target_ulong a0, int v)
4787
{
4788
    int low, high;
4789
    low = ldl(a0);
4790
    high = ldl(a0 + 4);
4791
    if (v < low || v > high) {
4792
        raise_exception(EXCP05_BOUND);
4793
    }
4794
}
4795

    
4796
static float approx_rsqrt(float a)
4797
{
4798
    return 1.0 / sqrt(a);
4799
}
4800

    
4801
static float approx_rcp(float a)
4802
{
4803
    return 1.0 / a;
4804
}
4805

    
4806
#if !defined(CONFIG_USER_ONLY)
4807

    
4808
#define MMUSUFFIX _mmu
4809

    
4810
#define SHIFT 0
4811
#include "softmmu_template.h"
4812

    
4813
#define SHIFT 1
4814
#include "softmmu_template.h"
4815

    
4816
#define SHIFT 2
4817
#include "softmmu_template.h"
4818

    
4819
#define SHIFT 3
4820
#include "softmmu_template.h"
4821

    
4822
#endif
4823

    
4824
#if !defined(CONFIG_USER_ONLY)
4825
/* try to fill the TLB and return an exception if error. If retaddr is
4826
   NULL, it means that the function was called in C code (i.e. not
4827
   from generated code or from helper.c) */
4828
/* XXX: fix it to restore all registers */
4829
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4830
{
4831
    TranslationBlock *tb;
4832
    int ret;
4833
    unsigned long pc;
4834
    CPUX86State *saved_env;
4835

    
4836
    /* XXX: hack to restore env in all cases, even if not called from
4837
       generated code */
4838
    saved_env = env;
4839
    env = cpu_single_env;
4840

    
4841
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4842
    if (ret) {
4843
        if (retaddr) {
4844
            /* now we have a real cpu fault */
4845
            pc = (unsigned long)retaddr;
4846
            tb = tb_find_pc(pc);
4847
            if (tb) {
4848
                /* the PC is inside the translated code. It means that we have
4849
                   a virtual CPU fault */
4850
                cpu_restore_state(tb, env, pc, NULL);
4851
            }
4852
        }
4853
        raise_exception_err(env->exception_index, env->error_code);
4854
    }
4855
    env = saved_env;
4856
}
4857
#endif
4858

    
4859
/* Secure Virtual Machine helpers */
4860

    
4861
#if defined(CONFIG_USER_ONLY)
4862

    
4863
void helper_vmrun(int aflag, int next_eip_addend)
4864
{ 
4865
}
4866
void helper_vmmcall(void) 
4867
{ 
4868
}
4869
void helper_vmload(int aflag)
4870
{ 
4871
}
4872
void helper_vmsave(int aflag)
4873
{ 
4874
}
4875
void helper_stgi(void)
4876
{
4877
}
4878
void helper_clgi(void)
4879
{
4880
}
4881
void helper_skinit(void) 
4882
{ 
4883
}
4884
void helper_invlpga(int aflag)
4885
{ 
4886
}
4887
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4888
{ 
4889
}
4890
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4891
{
4892
}
4893

    
4894
void helper_svm_check_io(uint32_t port, uint32_t param, 
4895
                         uint32_t next_eip_addend)
4896
{
4897
}
4898
#else
4899

    
4900
static inline void svm_save_seg(target_phys_addr_t addr,
4901
                                const SegmentCache *sc)
4902
{
4903
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4904
             sc->selector);
4905
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4906
             sc->base);
4907
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4908
             sc->limit);
4909
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4910
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4911
}
4912
                                
4913
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4914
{
4915
    unsigned int flags;
4916

    
4917
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4918
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4919
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4920
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4921
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4922
}
4923

    
4924
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4925
                                      CPUState *env, int seg_reg)
4926
{
4927
    SegmentCache sc1, *sc = &sc1;
4928
    svm_load_seg(addr, sc);
4929
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4930
                           sc->base, sc->limit, sc->flags);
4931
}
4932

    
4933
void helper_vmrun(int aflag, int next_eip_addend)
4934
{
4935
    target_ulong addr;
4936
    uint32_t event_inj;
4937
    uint32_t int_ctl;
4938

    
4939
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4940

    
4941
    if (aflag == 2)
4942
        addr = EAX;
4943
    else
4944
        addr = (uint32_t)EAX;
4945

    
4946
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4947

    
4948
    env->vm_vmcb = addr;
4949

    
4950
    /* save the current CPU state in the hsave page */
4951
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4952
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4953

    
4954
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4955
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4956

    
4957
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4958
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4959
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4960
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4961
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4962
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4963

    
4964
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4965
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4966

    
4967
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4968
                  &env->segs[R_ES]);
4969
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4970
                 &env->segs[R_CS]);
4971
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4972
                 &env->segs[R_SS]);
4973
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4974
                 &env->segs[R_DS]);
4975

    
4976
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4977
             EIP + next_eip_addend);
4978
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4979
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4980

    
4981
    /* load the interception bitmaps so we do not need to access the
4982
       vmcb in svm mode */
4983
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4984
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4985
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4986
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4987
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4988
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4989

    
4990
    /* enable intercepts */
4991
    env->hflags |= HF_SVMI_MASK;
4992

    
4993
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4994

    
4995
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4996
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4997

    
4998
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4999
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5000

    
5001
    /* clear exit_info_2 so we behave like the real hardware */
5002
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5003

    
5004
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5005
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5006
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5007
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5008
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5009
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5010
    if (int_ctl & V_INTR_MASKING_MASK) {
5011
        env->v_tpr = int_ctl & V_TPR_MASK;
5012
        env->hflags2 |= HF2_VINTR_MASK;
5013
        if (env->eflags & IF_MASK)
5014
            env->hflags2 |= HF2_HIF_MASK;
5015
    }
5016

    
5017
    cpu_load_efer(env, 
5018
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5019
    env->eflags = 0;
5020
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5021
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5022
    CC_OP = CC_OP_EFLAGS;
5023

    
5024
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5025
                       env, R_ES);
5026
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5027
                       env, R_CS);
5028
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5029
                       env, R_SS);
5030
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5031
                       env, R_DS);
5032

    
5033
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5034
    env->eip = EIP;
5035
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5036
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5037
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5038
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5039
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5040

    
5041
    /* FIXME: guest state consistency checks */
5042

    
5043
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5044
        case TLB_CONTROL_DO_NOTHING:
5045
            break;
5046
        case TLB_CONTROL_FLUSH_ALL_ASID:
5047
            /* FIXME: this is not 100% correct but should work for now */
5048
            tlb_flush(env, 1);
5049
        break;
5050
    }
5051

    
5052
    env->hflags2 |= HF2_GIF_MASK;
5053

    
5054
    if (int_ctl & V_IRQ_MASK) {
5055
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5056
    }
5057

    
5058
    /* maybe we need to inject an event */
5059
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5060
    if (event_inj & SVM_EVTINJ_VALID) {
5061
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5062
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5063
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5064

    
5065
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5066
        /* FIXME: need to implement valid_err */
5067
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5068
        case SVM_EVTINJ_TYPE_INTR:
5069
                env->exception_index = vector;
5070
                env->error_code = event_inj_err;
5071
                env->exception_is_int = 0;
5072
                env->exception_next_eip = -1;
5073
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5074
                /* XXX: is it always correct ? */
5075
                do_interrupt(vector, 0, 0, 0, 1);
5076
                break;
5077
        case SVM_EVTINJ_TYPE_NMI:
5078
                env->exception_index = EXCP02_NMI;
5079
                env->error_code = event_inj_err;
5080
                env->exception_is_int = 0;
5081
                env->exception_next_eip = EIP;
5082
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5083
                cpu_loop_exit();
5084
                break;
5085
        case SVM_EVTINJ_TYPE_EXEPT:
5086
                env->exception_index = vector;
5087
                env->error_code = event_inj_err;
5088
                env->exception_is_int = 0;
5089
                env->exception_next_eip = -1;
5090
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5091
                cpu_loop_exit();
5092
                break;
5093
        case SVM_EVTINJ_TYPE_SOFT:
5094
                env->exception_index = vector;
5095
                env->error_code = event_inj_err;
5096
                env->exception_is_int = 1;
5097
                env->exception_next_eip = EIP;
5098
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5099
                cpu_loop_exit();
5100
                break;
5101
        }
5102
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5103
    }
5104
}
5105

    
5106
void helper_vmmcall(void)
5107
{
5108
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5109
    raise_exception(EXCP06_ILLOP);
5110
}
5111

    
5112
void helper_vmload(int aflag)
5113
{
5114
    target_ulong addr;
5115
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5116

    
5117
    if (aflag == 2)
5118
        addr = EAX;
5119
    else
5120
        addr = (uint32_t)EAX;
5121

    
5122
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5123
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5124
                env->segs[R_FS].base);
5125

    
5126
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5127
                       env, R_FS);
5128
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5129
                       env, R_GS);
5130
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5131
                 &env->tr);
5132
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5133
                 &env->ldt);
5134

    
5135
#ifdef TARGET_X86_64
5136
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5137
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5138
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5139
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5140
#endif
5141
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5142
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5143
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5144
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5145
}
5146

    
5147
void helper_vmsave(int aflag)
5148
{
5149
    target_ulong addr;
5150
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5151

    
5152
    if (aflag == 2)
5153
        addr = EAX;
5154
    else
5155
        addr = (uint32_t)EAX;
5156

    
5157
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5158
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5159
                env->segs[R_FS].base);
5160

    
5161
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5162
                 &env->segs[R_FS]);
5163
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5164
                 &env->segs[R_GS]);
5165
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5166
                 &env->tr);
5167
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5168
                 &env->ldt);
5169

    
5170
#ifdef TARGET_X86_64
5171
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5172
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5173
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5174
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5175
#endif
5176
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5177
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5178
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5179
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5180
}
5181

    
5182
void helper_stgi(void)
5183
{
5184
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5185
    env->hflags2 |= HF2_GIF_MASK;
5186
}
5187

    
5188
void helper_clgi(void)
5189
{
5190
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5191
    env->hflags2 &= ~HF2_GIF_MASK;
5192
}
5193

    
5194
void helper_skinit(void)
5195
{
5196
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5197
    /* XXX: not implemented */
5198
    raise_exception(EXCP06_ILLOP);
5199
}
5200

    
5201
void helper_invlpga(int aflag)
5202
{
5203
    target_ulong addr;
5204
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5205
    
5206
    if (aflag == 2)
5207
        addr = EAX;
5208
    else
5209
        addr = (uint32_t)EAX;
5210

    
5211
    /* XXX: could use the ASID to see if it is needed to do the
5212
       flush */
5213
    tlb_flush_page(env, addr);
5214
}
5215

    
5216
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5217
{
5218
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5219
        return;
5220
    switch(type) {
5221
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5222
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5223
            helper_vmexit(type, param);
5224
        }
5225
        break;
5226
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5227
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5228
            helper_vmexit(type, param);
5229
        }
5230
        break;
5231
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5232
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5233
            helper_vmexit(type, param);
5234
        }
5235
        break;
5236
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5237
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5238
            helper_vmexit(type, param);
5239
        }
5240
        break;
5241
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5242
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5243
            helper_vmexit(type, param);
5244
        }
5245
        break;
5246
    case SVM_EXIT_MSR:
5247
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5248
            /* FIXME: this should be read in at vmrun (faster this way?) */
5249
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5250
            uint32_t t0, t1;
5251
            switch((uint32_t)ECX) {
5252
            case 0 ... 0x1fff:
5253
                t0 = (ECX * 2) % 8;
5254
                t1 = ECX / 8;
5255
                break;
5256
            case 0xc0000000 ... 0xc0001fff:
5257
                t0 = (8192 + ECX - 0xc0000000) * 2;
5258
                t1 = (t0 / 8);
5259
                t0 %= 8;
5260
                break;
5261
            case 0xc0010000 ... 0xc0011fff:
5262
                t0 = (16384 + ECX - 0xc0010000) * 2;
5263
                t1 = (t0 / 8);
5264
                t0 %= 8;
5265
                break;
5266
            default:
5267
                helper_vmexit(type, param);
5268
                t0 = 0;
5269
                t1 = 0;
5270
                break;
5271
            }
5272
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5273
                helper_vmexit(type, param);
5274
        }
5275
        break;
5276
    default:
5277
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5278
            helper_vmexit(type, param);
5279
        }
5280
        break;
5281
    }
5282
}
5283

    
5284
void helper_svm_check_io(uint32_t port, uint32_t param, 
5285
                         uint32_t next_eip_addend)
5286
{
5287
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5288
        /* FIXME: this should be read in at vmrun (faster this way?) */
5289
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5290
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5291
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5292
            /* next EIP */
5293
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5294
                     env->eip + next_eip_addend);
5295
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5296
        }
5297
    }
5298
}
5299

    
5300
/* Note: currently only 32 bits of exit_code are used */
5301
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5302
{
5303
    uint32_t int_ctl;
5304

    
5305
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5306
                exit_code, exit_info_1,
5307
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5308
                EIP);
5309

    
5310
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5311
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5312
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5313
    } else {
5314
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5315
    }
5316

    
5317
    /* Save the VM state in the vmcb */
5318
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5319
                 &env->segs[R_ES]);
5320
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5321
                 &env->segs[R_CS]);
5322
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5323
                 &env->segs[R_SS]);
5324
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5325
                 &env->segs[R_DS]);
5326

    
5327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5328
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5329

    
5330
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5331
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5332

    
5333
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5335
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5338

    
5339
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5340
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5341
    int_ctl |= env->v_tpr & V_TPR_MASK;
5342
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5343
        int_ctl |= V_IRQ_MASK;
5344
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5345

    
5346
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5347
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5348
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5349
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5350
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5351
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5352
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5353

    
5354
    /* Reload the host state from vm_hsave */
5355
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5356
    env->hflags &= ~HF_SVMI_MASK;
5357
    env->intercept = 0;
5358
    env->intercept_exceptions = 0;
5359
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5360
    env->tsc_offset = 0;
5361

    
5362
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5363
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5364

    
5365
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5366
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5367

    
5368
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5369
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5370
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5371
    /* we need to set the efer after the crs so the hidden flags get
5372
       set properly */
5373
    cpu_load_efer(env, 
5374
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5375
    env->eflags = 0;
5376
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5377
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5378
    CC_OP = CC_OP_EFLAGS;
5379

    
5380
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5381
                       env, R_ES);
5382
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5383
                       env, R_CS);
5384
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5385
                       env, R_SS);
5386
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5387
                       env, R_DS);
5388

    
5389
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5390
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5391
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5392

    
5393
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5394
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5395

    
5396
    /* other setups */
5397
    cpu_x86_set_cpl(env, 0);
5398
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5399
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5400

    
5401
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5402
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5403
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5404
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5405

    
5406
    env->hflags2 &= ~HF2_GIF_MASK;
5407
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5408

    
5409
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5410

    
5411
    /* Clears the TSC_OFFSET inside the processor. */
5412

    
5413
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5414
       from the page table indicated the host's CR3. If the PDPEs contain
5415
       illegal state, the processor causes a shutdown. */
5416

    
5417
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5418
    env->cr[0] |= CR0_PE_MASK;
5419
    env->eflags &= ~VM_MASK;
5420

    
5421
    /* Disables all breakpoints in the host DR7 register. */
5422

    
5423
    /* Checks the reloaded host state for consistency. */
5424

    
5425
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5426
       host's code segment or non-canonical (in the case of long mode), a
5427
       #GP fault is delivered inside the host.) */
5428

    
5429
    /* remove any pending exception */
5430
    env->exception_index = -1;
5431
    env->error_code = 0;
5432
    env->old_exception = -1;
5433

    
5434
    cpu_loop_exit();
5435
}
5436

    
5437
#endif
5438

    
5439
/* MMX/SSE */
5440
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5441
void helper_enter_mmx(void)
5442
{
5443
    env->fpstt = 0;
5444
    *(uint32_t *)(env->fptags) = 0;
5445
    *(uint32_t *)(env->fptags + 4) = 0;
5446
}
5447

    
5448
void helper_emms(void)
5449
{
5450
    /* set to empty state */
5451
    *(uint32_t *)(env->fptags) = 0x01010101;
5452
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5453
}
5454

    
5455
/* XXX: suppress */
5456
void helper_movq(void *d, void *s)
5457
{
5458
    *(uint64_t *)d = *(uint64_t *)s;
5459
}
5460

    
5461
#define SHIFT 0
5462
#include "ops_sse.h"
5463

    
5464
#define SHIFT 1
5465
#include "ops_sse.h"
5466

    
5467
#define SHIFT 0
5468
#include "helper_template.h"
5469
#undef SHIFT
5470

    
5471
#define SHIFT 1
5472
#include "helper_template.h"
5473
#undef SHIFT
5474

    
5475
#define SHIFT 2
5476
#include "helper_template.h"
5477
#undef SHIFT
5478

    
5479
#ifdef TARGET_X86_64
5480

    
5481
#define SHIFT 3
5482
#include "helper_template.h"
5483
#undef SHIFT
5484

    
5485
#endif
5486

    
5487
/* bit operations */
5488
target_ulong helper_bsf(target_ulong t0)
5489
{
5490
    int count;
5491
    target_ulong res;
5492

    
5493
    res = t0;
5494
    count = 0;
5495
    while ((res & 1) == 0) {
5496
        count++;
5497
        res >>= 1;
5498
    }
5499
    return count;
5500
}
5501

    
5502
target_ulong helper_bsr(target_ulong t0)
5503
{
5504
    int count;
5505
    target_ulong res, mask;
5506
    
5507
    res = t0;
5508
    count = TARGET_LONG_BITS - 1;
5509
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5510
    while ((res & mask) == 0) {
5511
        count--;
5512
        res <<= 1;
5513
    }
5514
    return count;
5515
}
5516

    
5517

    
5518
static int compute_all_eflags(void)
5519
{
5520
    return CC_SRC;
5521
}
5522

    
5523
static int compute_c_eflags(void)
5524
{
5525
    return CC_SRC & CC_C;
5526
}
5527

    
5528
uint32_t helper_cc_compute_all(int op)
5529
{
5530
    switch (op) {
5531
    default: /* should never happen */ return 0;
5532

    
5533
    case CC_OP_EFLAGS: return compute_all_eflags();
5534

    
5535
    case CC_OP_MULB: return compute_all_mulb();
5536
    case CC_OP_MULW: return compute_all_mulw();
5537
    case CC_OP_MULL: return compute_all_mull();
5538

    
5539
    case CC_OP_ADDB: return compute_all_addb();
5540
    case CC_OP_ADDW: return compute_all_addw();
5541
    case CC_OP_ADDL: return compute_all_addl();
5542

    
5543
    case CC_OP_ADCB: return compute_all_adcb();
5544
    case CC_OP_ADCW: return compute_all_adcw();
5545
    case CC_OP_ADCL: return compute_all_adcl();
5546

    
5547
    case CC_OP_SUBB: return compute_all_subb();
5548
    case CC_OP_SUBW: return compute_all_subw();
5549
    case CC_OP_SUBL: return compute_all_subl();
5550

    
5551
    case CC_OP_SBBB: return compute_all_sbbb();
5552
    case CC_OP_SBBW: return compute_all_sbbw();
5553
    case CC_OP_SBBL: return compute_all_sbbl();
5554

    
5555
    case CC_OP_LOGICB: return compute_all_logicb();
5556
    case CC_OP_LOGICW: return compute_all_logicw();
5557
    case CC_OP_LOGICL: return compute_all_logicl();
5558

    
5559
    case CC_OP_INCB: return compute_all_incb();
5560
    case CC_OP_INCW: return compute_all_incw();
5561
    case CC_OP_INCL: return compute_all_incl();
5562

    
5563
    case CC_OP_DECB: return compute_all_decb();
5564
    case CC_OP_DECW: return compute_all_decw();
5565
    case CC_OP_DECL: return compute_all_decl();
5566

    
5567
    case CC_OP_SHLB: return compute_all_shlb();
5568
    case CC_OP_SHLW: return compute_all_shlw();
5569
    case CC_OP_SHLL: return compute_all_shll();
5570

    
5571
    case CC_OP_SARB: return compute_all_sarb();
5572
    case CC_OP_SARW: return compute_all_sarw();
5573
    case CC_OP_SARL: return compute_all_sarl();
5574

    
5575
#ifdef TARGET_X86_64
5576
    case CC_OP_MULQ: return compute_all_mulq();
5577

    
5578
    case CC_OP_ADDQ: return compute_all_addq();
5579

    
5580
    case CC_OP_ADCQ: return compute_all_adcq();
5581

    
5582
    case CC_OP_SUBQ: return compute_all_subq();
5583

    
5584
    case CC_OP_SBBQ: return compute_all_sbbq();
5585

    
5586
    case CC_OP_LOGICQ: return compute_all_logicq();
5587

    
5588
    case CC_OP_INCQ: return compute_all_incq();
5589

    
5590
    case CC_OP_DECQ: return compute_all_decq();
5591

    
5592
    case CC_OP_SHLQ: return compute_all_shlq();
5593

    
5594
    case CC_OP_SARQ: return compute_all_sarq();
5595
#endif
5596
    }
5597
}
5598

    
5599
uint32_t helper_cc_compute_c(int op)
5600
{
5601
    switch (op) {
5602
    default: /* should never happen */ return 0;
5603

    
5604
    case CC_OP_EFLAGS: return compute_c_eflags();
5605

    
5606
    case CC_OP_MULB: return compute_c_mull();
5607
    case CC_OP_MULW: return compute_c_mull();
5608
    case CC_OP_MULL: return compute_c_mull();
5609

    
5610
    case CC_OP_ADDB: return compute_c_addb();
5611
    case CC_OP_ADDW: return compute_c_addw();
5612
    case CC_OP_ADDL: return compute_c_addl();
5613

    
5614
    case CC_OP_ADCB: return compute_c_adcb();
5615
    case CC_OP_ADCW: return compute_c_adcw();
5616
    case CC_OP_ADCL: return compute_c_adcl();
5617

    
5618
    case CC_OP_SUBB: return compute_c_subb();
5619
    case CC_OP_SUBW: return compute_c_subw();
5620
    case CC_OP_SUBL: return compute_c_subl();
5621

    
5622
    case CC_OP_SBBB: return compute_c_sbbb();
5623
    case CC_OP_SBBW: return compute_c_sbbw();
5624
    case CC_OP_SBBL: return compute_c_sbbl();
5625

    
5626
    case CC_OP_LOGICB: return compute_c_logicb();
5627
    case CC_OP_LOGICW: return compute_c_logicw();
5628
    case CC_OP_LOGICL: return compute_c_logicl();
5629

    
5630
    case CC_OP_INCB: return compute_c_incl();
5631
    case CC_OP_INCW: return compute_c_incl();
5632
    case CC_OP_INCL: return compute_c_incl();
5633

    
5634
    case CC_OP_DECB: return compute_c_incl();
5635
    case CC_OP_DECW: return compute_c_incl();
5636
    case CC_OP_DECL: return compute_c_incl();
5637

    
5638
    case CC_OP_SHLB: return compute_c_shlb();
5639
    case CC_OP_SHLW: return compute_c_shlw();
5640
    case CC_OP_SHLL: return compute_c_shll();
5641

    
5642
    case CC_OP_SARB: return compute_c_sarl();
5643
    case CC_OP_SARW: return compute_c_sarl();
5644
    case CC_OP_SARL: return compute_c_sarl();
5645

    
5646
#ifdef TARGET_X86_64
5647
    case CC_OP_MULQ: return compute_c_mull();
5648

    
5649
    case CC_OP_ADDQ: return compute_c_addq();
5650

    
5651
    case CC_OP_ADCQ: return compute_c_adcq();
5652

    
5653
    case CC_OP_SUBQ: return compute_c_subq();
5654

    
5655
    case CC_OP_SBBQ: return compute_c_sbbq();
5656

    
5657
    case CC_OP_LOGICQ: return compute_c_logicq();
5658

    
5659
    case CC_OP_INCQ: return compute_c_incl();
5660

    
5661
    case CC_OP_DECQ: return compute_c_incl();
5662

    
5663
    case CC_OP_SHLQ: return compute_c_shlq();
5664

    
5665
    case CC_OP_SARQ: return compute_c_sarl();
5666
#endif
5667
    }
5668
}