Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 63a54736

History | View | Annotate | Download (159 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#define CPU_NO_GLOBAL_REGS
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26

    
27
#ifdef DEBUG_PCALL
28
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29
#  define LOG_PCALL_STATE(env) \
30
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
31
#else
32
#  define LOG_PCALL(...) do { } while (0)
33
#  define LOG_PCALL_STATE(env) do { } while (0)
34
#endif
35

    
36

    
37
#if 0
38
#define raise_exception_err(a, b)\
39
do {\
40
    qemu_log("raise_exception line=%d\n", __LINE__);\
41
    (raise_exception_err)(a, b);\
42
} while (0)
43
#endif
44

    
45
static const uint8_t parity_table[256] = {
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78
};
79

    
80
/* modulo 17 table */
81
static const uint8_t rclw_table[32] = {
82
    0, 1, 2, 3, 4, 5, 6, 7,
83
    8, 9,10,11,12,13,14,15,
84
   16, 0, 1, 2, 3, 4, 5, 6,
85
    7, 8, 9,10,11,12,13,14,
86
};
87

    
88
/* modulo 9 table */
89
static const uint8_t rclb_table[32] = {
90
    0, 1, 2, 3, 4, 5, 6, 7,
91
    8, 0, 1, 2, 3, 4, 5, 6,
92
    7, 8, 0, 1, 2, 3, 4, 5,
93
    6, 7, 8, 0, 1, 2, 3, 4,
94
};
95

    
96
static const CPU86_LDouble f15rk[7] =
97
{
98
    0.00000000000000000000L,
99
    1.00000000000000000000L,
100
    3.14159265358979323851L,  /*pi*/
101
    0.30102999566398119523L,  /*lg2*/
102
    0.69314718055994530943L,  /*ln2*/
103
    1.44269504088896340739L,  /*l2e*/
104
    3.32192809488736234781L,  /*l2t*/
105
};
106

    
107
/* broken thread support */
108

    
109
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
110

    
111
void helper_lock(void)
112
{
113
    spin_lock(&global_cpu_lock);
114
}
115

    
116
void helper_unlock(void)
117
{
118
    spin_unlock(&global_cpu_lock);
119
}
120

    
121
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
122
{
123
    load_eflags(t0, update_mask);
124
}
125

    
126
target_ulong helper_read_eflags(void)
127
{
128
    uint32_t eflags;
129
    eflags = helper_cc_compute_all(CC_OP);
130
    eflags |= (DF & DF_MASK);
131
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
132
    return eflags;
133
}
134

    
135
/* return non zero if error */
136
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
137
                               int selector)
138
{
139
    SegmentCache *dt;
140
    int index;
141
    target_ulong ptr;
142

    
143
    if (selector & 0x4)
144
        dt = &env->ldt;
145
    else
146
        dt = &env->gdt;
147
    index = selector & ~7;
148
    if ((index + 7) > dt->limit)
149
        return -1;
150
    ptr = dt->base + index;
151
    *e1_ptr = ldl_kernel(ptr);
152
    *e2_ptr = ldl_kernel(ptr + 4);
153
    return 0;
154
}
155

    
156
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
157
{
158
    unsigned int limit;
159
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
160
    if (e2 & DESC_G_MASK)
161
        limit = (limit << 12) | 0xfff;
162
    return limit;
163
}
164

    
165
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
166
{
167
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
168
}
169

    
170
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
171
{
172
    sc->base = get_seg_base(e1, e2);
173
    sc->limit = get_seg_limit(e1, e2);
174
    sc->flags = e2;
175
}
176

    
177
/* init the segment cache in vm86 mode. */
178
static inline void load_seg_vm(int seg, int selector)
179
{
180
    selector &= 0xffff;
181
    cpu_x86_load_seg_cache(env, seg, selector,
182
                           (selector << 4), 0xffff, 0);
183
}
184

    
185
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
186
                                       uint32_t *esp_ptr, int dpl)
187
{
188
    int type, index, shift;
189

    
190
#if 0
191
    {
192
        int i;
193
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
194
        for(i=0;i<env->tr.limit;i++) {
195
            printf("%02x ", env->tr.base[i]);
196
            if ((i & 7) == 7) printf("\n");
197
        }
198
        printf("\n");
199
    }
200
#endif
201

    
202
    if (!(env->tr.flags & DESC_P_MASK))
203
        cpu_abort(env, "invalid tss");
204
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
205
    if ((type & 7) != 1)
206
        cpu_abort(env, "invalid tss type");
207
    shift = type >> 3;
208
    index = (dpl * 4 + 2) << shift;
209
    if (index + (4 << shift) - 1 > env->tr.limit)
210
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
211
    if (shift == 0) {
212
        *esp_ptr = lduw_kernel(env->tr.base + index);
213
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
214
    } else {
215
        *esp_ptr = ldl_kernel(env->tr.base + index);
216
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
217
    }
218
}
219

    
220
/* XXX: merge with load_seg() */
221
static void tss_load_seg(int seg_reg, int selector)
222
{
223
    uint32_t e1, e2;
224
    int rpl, dpl, cpl;
225

    
226
    if ((selector & 0xfffc) != 0) {
227
        if (load_segment(&e1, &e2, selector) != 0)
228
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        if (!(e2 & DESC_S_MASK))
230
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
        rpl = selector & 3;
232
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
233
        cpl = env->hflags & HF_CPL_MASK;
234
        if (seg_reg == R_CS) {
235
            if (!(e2 & DESC_CS_MASK))
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            /* XXX: is it correct ? */
238
            if (dpl != rpl)
239
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240
            if ((e2 & DESC_C_MASK) && dpl > rpl)
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
        } else if (seg_reg == R_SS) {
243
            /* SS must be writable data */
244
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
245
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            if (dpl != cpl || dpl != rpl)
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
        } else {
249
            /* not readable code */
250
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
            /* if data or non conforming code, checks the rights */
253
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
254
                if (dpl < cpl || dpl < rpl)
255
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            }
257
        }
258
        if (!(e2 & DESC_P_MASK))
259
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
260
        cpu_x86_load_seg_cache(env, seg_reg, selector,
261
                       get_seg_base(e1, e2),
262
                       get_seg_limit(e1, e2),
263
                       e2);
264
    } else {
265
        if (seg_reg == R_SS || seg_reg == R_CS)
266
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
267
    }
268
}
269

    
270
#define SWITCH_TSS_JMP  0
271
#define SWITCH_TSS_IRET 1
272
#define SWITCH_TSS_CALL 2
273

    
274
/* XXX: restore CPU state in registers (PowerPC case) */
275
static void switch_tss(int tss_selector,
276
                       uint32_t e1, uint32_t e2, int source,
277
                       uint32_t next_eip)
278
{
279
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
280
    target_ulong tss_base;
281
    uint32_t new_regs[8], new_segs[6];
282
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
283
    uint32_t old_eflags, eflags_mask;
284
    SegmentCache *dt;
285
    int index;
286
    target_ulong ptr;
287

    
288
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
289
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
290

    
291
    /* if task gate, we read the TSS segment and we load it */
292
    if (type == 5) {
293
        if (!(e2 & DESC_P_MASK))
294
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
295
        tss_selector = e1 >> 16;
296
        if (tss_selector & 4)
297
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
        if (load_segment(&e1, &e2, tss_selector) != 0)
299
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
        if (e2 & DESC_S_MASK)
301
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
303
        if ((type & 7) != 1)
304
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305
    }
306

    
307
    if (!(e2 & DESC_P_MASK))
308
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309

    
310
    if (type & 8)
311
        tss_limit_max = 103;
312
    else
313
        tss_limit_max = 43;
314
    tss_limit = get_seg_limit(e1, e2);
315
    tss_base = get_seg_base(e1, e2);
316
    if ((tss_selector & 4) != 0 ||
317
        tss_limit < tss_limit_max)
318
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
319
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320
    if (old_type & 8)
321
        old_tss_limit_max = 103;
322
    else
323
        old_tss_limit_max = 43;
324

    
325
    /* read all the registers from the new TSS */
326
    if (type & 8) {
327
        /* 32 bit */
328
        new_cr3 = ldl_kernel(tss_base + 0x1c);
329
        new_eip = ldl_kernel(tss_base + 0x20);
330
        new_eflags = ldl_kernel(tss_base + 0x24);
331
        for(i = 0; i < 8; i++)
332
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
333
        for(i = 0; i < 6; i++)
334
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
335
        new_ldt = lduw_kernel(tss_base + 0x60);
336
        new_trap = ldl_kernel(tss_base + 0x64);
337
    } else {
338
        /* 16 bit */
339
        new_cr3 = 0;
340
        new_eip = lduw_kernel(tss_base + 0x0e);
341
        new_eflags = lduw_kernel(tss_base + 0x10);
342
        for(i = 0; i < 8; i++)
343
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
344
        for(i = 0; i < 4; i++)
345
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
346
        new_ldt = lduw_kernel(tss_base + 0x2a);
347
        new_segs[R_FS] = 0;
348
        new_segs[R_GS] = 0;
349
        new_trap = 0;
350
    }
351

    
352
    /* NOTE: we must avoid memory exceptions during the task switch,
353
       so we make dummy accesses before */
354
    /* XXX: it can still fail in some cases, so a bigger hack is
355
       necessary to valid the TLB after having done the accesses */
356

    
357
    v1 = ldub_kernel(env->tr.base);
358
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
359
    stb_kernel(env->tr.base, v1);
360
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
361

    
362
    /* clear busy bit (it is restartable) */
363
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
364
        target_ulong ptr;
365
        uint32_t e2;
366
        ptr = env->gdt.base + (env->tr.selector & ~7);
367
        e2 = ldl_kernel(ptr + 4);
368
        e2 &= ~DESC_TSS_BUSY_MASK;
369
        stl_kernel(ptr + 4, e2);
370
    }
371
    old_eflags = compute_eflags();
372
    if (source == SWITCH_TSS_IRET)
373
        old_eflags &= ~NT_MASK;
374

    
375
    /* save the current state in the old TSS */
376
    if (type & 8) {
377
        /* 32 bit */
378
        stl_kernel(env->tr.base + 0x20, next_eip);
379
        stl_kernel(env->tr.base + 0x24, old_eflags);
380
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
381
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
382
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
383
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
384
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
385
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
386
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
387
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
388
        for(i = 0; i < 6; i++)
389
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390
    } else {
391
        /* 16 bit */
392
        stw_kernel(env->tr.base + 0x0e, next_eip);
393
        stw_kernel(env->tr.base + 0x10, old_eflags);
394
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
395
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
396
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
397
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
398
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
399
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
400
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
401
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
402
        for(i = 0; i < 4; i++)
403
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404
    }
405

    
406
    /* now if an exception occurs, it will occurs in the next task
407
       context */
408

    
409
    if (source == SWITCH_TSS_CALL) {
410
        stw_kernel(tss_base, env->tr.selector);
411
        new_eflags |= NT_MASK;
412
    }
413

    
414
    /* set busy bit */
415
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
416
        target_ulong ptr;
417
        uint32_t e2;
418
        ptr = env->gdt.base + (tss_selector & ~7);
419
        e2 = ldl_kernel(ptr + 4);
420
        e2 |= DESC_TSS_BUSY_MASK;
421
        stl_kernel(ptr + 4, e2);
422
    }
423

    
424
    /* set the new CPU state */
425
    /* from this point, any exception which occurs can give problems */
426
    env->cr[0] |= CR0_TS_MASK;
427
    env->hflags |= HF_TS_MASK;
428
    env->tr.selector = tss_selector;
429
    env->tr.base = tss_base;
430
    env->tr.limit = tss_limit;
431
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
432

    
433
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
434
        cpu_x86_update_cr3(env, new_cr3);
435
    }
436

    
437
    /* load all registers without an exception, then reload them with
438
       possible exception */
439
    env->eip = new_eip;
440
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
441
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
442
    if (!(type & 8))
443
        eflags_mask &= 0xffff;
444
    load_eflags(new_eflags, eflags_mask);
445
    /* XXX: what to do in 16 bit case ? */
446
    EAX = new_regs[0];
447
    ECX = new_regs[1];
448
    EDX = new_regs[2];
449
    EBX = new_regs[3];
450
    ESP = new_regs[4];
451
    EBP = new_regs[5];
452
    ESI = new_regs[6];
453
    EDI = new_regs[7];
454
    if (new_eflags & VM_MASK) {
455
        for(i = 0; i < 6; i++)
456
            load_seg_vm(i, new_segs[i]);
457
        /* in vm86, CPL is always 3 */
458
        cpu_x86_set_cpl(env, 3);
459
    } else {
460
        /* CPL is set the RPL of CS */
461
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
462
        /* first just selectors as the rest may trigger exceptions */
463
        for(i = 0; i < 6; i++)
464
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465
    }
466

    
467
    env->ldt.selector = new_ldt & ~4;
468
    env->ldt.base = 0;
469
    env->ldt.limit = 0;
470
    env->ldt.flags = 0;
471

    
472
    /* load the LDT */
473
    if (new_ldt & 4)
474
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
475

    
476
    if ((new_ldt & 0xfffc) != 0) {
477
        dt = &env->gdt;
478
        index = new_ldt & ~7;
479
        if ((index + 7) > dt->limit)
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        ptr = dt->base + index;
482
        e1 = ldl_kernel(ptr);
483
        e2 = ldl_kernel(ptr + 4);
484
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        if (!(e2 & DESC_P_MASK))
487
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
489
    }
490

    
491
    /* load the segments */
492
    if (!(new_eflags & VM_MASK)) {
493
        tss_load_seg(R_CS, new_segs[R_CS]);
494
        tss_load_seg(R_SS, new_segs[R_SS]);
495
        tss_load_seg(R_ES, new_segs[R_ES]);
496
        tss_load_seg(R_DS, new_segs[R_DS]);
497
        tss_load_seg(R_FS, new_segs[R_FS]);
498
        tss_load_seg(R_GS, new_segs[R_GS]);
499
    }
500

    
501
    /* check that EIP is in the CS segment limits */
502
    if (new_eip > env->segs[R_CS].limit) {
503
        /* XXX: different exception if CALL ? */
504
        raise_exception_err(EXCP0D_GPF, 0);
505
    }
506

    
507
#ifndef CONFIG_USER_ONLY
508
    /* reset local breakpoints */
509
    if (env->dr[7] & 0x55) {
510
        for (i = 0; i < 4; i++) {
511
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
512
                hw_breakpoint_remove(env, i);
513
        }
514
        env->dr[7] &= ~0x55;
515
    }
516
#endif
517
}
518

    
519
/* check if Port I/O is allowed in TSS */
520
static inline void check_io(int addr, int size)
521
{
522
    int io_offset, val, mask;
523

    
524
    /* TSS must be a valid 32 bit one */
525
    if (!(env->tr.flags & DESC_P_MASK) ||
526
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
527
        env->tr.limit < 103)
528
        goto fail;
529
    io_offset = lduw_kernel(env->tr.base + 0x66);
530
    io_offset += (addr >> 3);
531
    /* Note: the check needs two bytes */
532
    if ((io_offset + 1) > env->tr.limit)
533
        goto fail;
534
    val = lduw_kernel(env->tr.base + io_offset);
535
    val >>= (addr & 7);
536
    mask = (1 << size) - 1;
537
    /* all bits must be zero to allow the I/O */
538
    if ((val & mask) != 0) {
539
    fail:
540
        raise_exception_err(EXCP0D_GPF, 0);
541
    }
542
}
543

    
544
void helper_check_iob(uint32_t t0)
545
{
546
    check_io(t0, 1);
547
}
548

    
549
void helper_check_iow(uint32_t t0)
550
{
551
    check_io(t0, 2);
552
}
553

    
554
void helper_check_iol(uint32_t t0)
555
{
556
    check_io(t0, 4);
557
}
558

    
559
void helper_outb(uint32_t port, uint32_t data)
560
{
561
    cpu_outb(port, data & 0xff);
562
}
563

    
564
target_ulong helper_inb(uint32_t port)
565
{
566
    return cpu_inb(port);
567
}
568

    
569
void helper_outw(uint32_t port, uint32_t data)
570
{
571
    cpu_outw(port, data & 0xffff);
572
}
573

    
574
target_ulong helper_inw(uint32_t port)
575
{
576
    return cpu_inw(port);
577
}
578

    
579
void helper_outl(uint32_t port, uint32_t data)
580
{
581
    cpu_outl(port, data);
582
}
583

    
584
target_ulong helper_inl(uint32_t port)
585
{
586
    return cpu_inl(port);
587
}
588

    
589
static inline unsigned int get_sp_mask(unsigned int e2)
590
{
591
    if (e2 & DESC_B_MASK)
592
        return 0xffffffff;
593
    else
594
        return 0xffff;
595
}
596

    
597
static int exeption_has_error_code(int intno)
598
{
599
        switch(intno) {
600
        case 8:
601
        case 10:
602
        case 11:
603
        case 12:
604
        case 13:
605
        case 14:
606
        case 17:
607
            return 1;
608
        }
609
        return 0;
610
}
611

    
612
#ifdef TARGET_X86_64
613
#define SET_ESP(val, sp_mask)\
614
do {\
615
    if ((sp_mask) == 0xffff)\
616
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617
    else if ((sp_mask) == 0xffffffffLL)\
618
        ESP = (uint32_t)(val);\
619
    else\
620
        ESP = (val);\
621
} while (0)
622
#else
623
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
624
#endif
625

    
626
/* in 64-bit machines, this can overflow. So this segment addition macro
627
 * can be used to trim the value to 32-bit whenever needed */
628
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
629

    
630
/* XXX: add a is_user flag to have proper security support */
631
#define PUSHW(ssp, sp, sp_mask, val)\
632
{\
633
    sp -= 2;\
634
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635
}
636

    
637
#define PUSHL(ssp, sp, sp_mask, val)\
638
{\
639
    sp -= 4;\
640
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
641
}
642

    
643
#define POPW(ssp, sp, sp_mask, val)\
644
{\
645
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
646
    sp += 2;\
647
}
648

    
649
#define POPL(ssp, sp, sp_mask, val)\
650
{\
651
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
652
    sp += 4;\
653
}
654

    
655
/* protected mode interrupt */
656
static void do_interrupt_protected(int intno, int is_int, int error_code,
657
                                   unsigned int next_eip, int is_hw)
658
{
659
    SegmentCache *dt;
660
    target_ulong ptr, ssp;
661
    int type, dpl, selector, ss_dpl, cpl;
662
    int has_error_code, new_stack, shift;
663
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
664
    uint32_t old_eip, sp_mask;
665

    
666
    has_error_code = 0;
667
    if (!is_int && !is_hw)
668
        has_error_code = exeption_has_error_code(intno);
669
    if (is_int)
670
        old_eip = next_eip;
671
    else
672
        old_eip = env->eip;
673

    
674
    dt = &env->idt;
675
    if (intno * 8 + 7 > dt->limit)
676
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
677
    ptr = dt->base + intno * 8;
678
    e1 = ldl_kernel(ptr);
679
    e2 = ldl_kernel(ptr + 4);
680
    /* check gate type */
681
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
682
    switch(type) {
683
    case 5: /* task gate */
684
        /* must do that check here to return the correct error code */
685
        if (!(e2 & DESC_P_MASK))
686
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
687
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
688
        if (has_error_code) {
689
            int type;
690
            uint32_t mask;
691
            /* push the error code */
692
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
693
            shift = type >> 3;
694
            if (env->segs[R_SS].flags & DESC_B_MASK)
695
                mask = 0xffffffff;
696
            else
697
                mask = 0xffff;
698
            esp = (ESP - (2 << shift)) & mask;
699
            ssp = env->segs[R_SS].base + esp;
700
            if (shift)
701
                stl_kernel(ssp, error_code);
702
            else
703
                stw_kernel(ssp, error_code);
704
            SET_ESP(esp, mask);
705
        }
706
        return;
707
    case 6: /* 286 interrupt gate */
708
    case 7: /* 286 trap gate */
709
    case 14: /* 386 interrupt gate */
710
    case 15: /* 386 trap gate */
711
        break;
712
    default:
713
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
714
        break;
715
    }
716
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
717
    cpl = env->hflags & HF_CPL_MASK;
718
    /* check privilege if software int */
719
    if (is_int && dpl < cpl)
720
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
721
    /* check valid bit */
722
    if (!(e2 & DESC_P_MASK))
723
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
724
    selector = e1 >> 16;
725
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
726
    if ((selector & 0xfffc) == 0)
727
        raise_exception_err(EXCP0D_GPF, 0);
728

    
729
    if (load_segment(&e1, &e2, selector) != 0)
730
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
732
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
734
    if (dpl > cpl)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_P_MASK))
737
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
738
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
739
        /* to inner privilege */
740
        get_ss_esp_from_tss(&ss, &esp, dpl);
741
        if ((ss & 0xfffc) == 0)
742
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
        if ((ss & 3) != dpl)
744
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
746
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
748
        if (ss_dpl != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (!(ss_e2 & DESC_S_MASK) ||
751
            (ss_e2 & DESC_CS_MASK) ||
752
            !(ss_e2 & DESC_W_MASK))
753
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754
        if (!(ss_e2 & DESC_P_MASK))
755
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756
        new_stack = 1;
757
        sp_mask = get_sp_mask(ss_e2);
758
        ssp = get_seg_base(ss_e1, ss_e2);
759
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
760
        /* to same privilege */
761
        if (env->eflags & VM_MASK)
762
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763
        new_stack = 0;
764
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
765
        ssp = env->segs[R_SS].base;
766
        esp = ESP;
767
        dpl = cpl;
768
    } else {
769
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
770
        new_stack = 0; /* avoid warning */
771
        sp_mask = 0; /* avoid warning */
772
        ssp = 0; /* avoid warning */
773
        esp = 0; /* avoid warning */
774
    }
775

    
776
    shift = type >> 3;
777

    
778
#if 0
779
    /* XXX: check that enough room is available */
780
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781
    if (env->eflags & VM_MASK)
782
        push_size += 8;
783
    push_size <<= shift;
784
#endif
785
    if (shift == 1) {
786
        if (new_stack) {
787
            if (env->eflags & VM_MASK) {
788
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
792
            }
793
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
794
            PUSHL(ssp, esp, sp_mask, ESP);
795
        }
796
        PUSHL(ssp, esp, sp_mask, compute_eflags());
797
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
798
        PUSHL(ssp, esp, sp_mask, old_eip);
799
        if (has_error_code) {
800
            PUSHL(ssp, esp, sp_mask, error_code);
801
        }
802
    } else {
803
        if (new_stack) {
804
            if (env->eflags & VM_MASK) {
805
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
809
            }
810
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
811
            PUSHW(ssp, esp, sp_mask, ESP);
812
        }
813
        PUSHW(ssp, esp, sp_mask, compute_eflags());
814
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
815
        PUSHW(ssp, esp, sp_mask, old_eip);
816
        if (has_error_code) {
817
            PUSHW(ssp, esp, sp_mask, error_code);
818
        }
819
    }
820

    
821
    if (new_stack) {
822
        if (env->eflags & VM_MASK) {
823
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
824
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
827
        }
828
        ss = (ss & ~3) | dpl;
829
        cpu_x86_load_seg_cache(env, R_SS, ss,
830
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
831
    }
832
    SET_ESP(esp, sp_mask);
833

    
834
    selector = (selector & ~3) | dpl;
835
    cpu_x86_load_seg_cache(env, R_CS, selector,
836
                   get_seg_base(e1, e2),
837
                   get_seg_limit(e1, e2),
838
                   e2);
839
    cpu_x86_set_cpl(env, dpl);
840
    env->eip = offset;
841

    
842
    /* interrupt gate clear IF mask */
843
    if ((type & 1) == 0) {
844
        env->eflags &= ~IF_MASK;
845
    }
846
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
847
}
848

    
849
#ifdef TARGET_X86_64
850

    
851
#define PUSHQ(sp, val)\
852
{\
853
    sp -= 8;\
854
    stq_kernel(sp, (val));\
855
}
856

    
857
#define POPQ(sp, val)\
858
{\
859
    val = ldq_kernel(sp);\
860
    sp += 8;\
861
}
862

    
863
static inline target_ulong get_rsp_from_tss(int level)
864
{
865
    int index;
866

    
867
#if 0
868
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
869
           env->tr.base, env->tr.limit);
870
#endif
871

    
872
    if (!(env->tr.flags & DESC_P_MASK))
873
        cpu_abort(env, "invalid tss");
874
    index = 8 * level + 4;
875
    if ((index + 7) > env->tr.limit)
876
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
877
    return ldq_kernel(env->tr.base + index);
878
}
879

    
880
/* 64 bit interrupt */
881
static void do_interrupt64(int intno, int is_int, int error_code,
882
                           target_ulong next_eip, int is_hw)
883
{
884
    SegmentCache *dt;
885
    target_ulong ptr;
886
    int type, dpl, selector, cpl, ist;
887
    int has_error_code, new_stack;
888
    uint32_t e1, e2, e3, ss;
889
    target_ulong old_eip, esp, offset;
890

    
891
    has_error_code = 0;
892
    if (!is_int && !is_hw)
893
        has_error_code = exeption_has_error_code(intno);
894
    if (is_int)
895
        old_eip = next_eip;
896
    else
897
        old_eip = env->eip;
898

    
899
    dt = &env->idt;
900
    if (intno * 16 + 15 > dt->limit)
901
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902
    ptr = dt->base + intno * 16;
903
    e1 = ldl_kernel(ptr);
904
    e2 = ldl_kernel(ptr + 4);
905
    e3 = ldl_kernel(ptr + 8);
906
    /* check gate type */
907
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908
    switch(type) {
909
    case 14: /* 386 interrupt gate */
910
    case 15: /* 386 trap gate */
911
        break;
912
    default:
913
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
        break;
915
    }
916
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
    cpl = env->hflags & HF_CPL_MASK;
918
    /* check privilege if software int */
919
    if (is_int && dpl < cpl)
920
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921
    /* check valid bit */
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924
    selector = e1 >> 16;
925
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926
    ist = e2 & 7;
927
    if ((selector & 0xfffc) == 0)
928
        raise_exception_err(EXCP0D_GPF, 0);
929

    
930
    if (load_segment(&e1, &e2, selector) != 0)
931
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935
    if (dpl > cpl)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_P_MASK))
938
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942
        /* to inner privilege */
943
        if (ist != 0)
944
            esp = get_rsp_from_tss(ist + 3);
945
        else
946
            esp = get_rsp_from_tss(dpl);
947
        esp &= ~0xfLL; /* align stack */
948
        ss = 0;
949
        new_stack = 1;
950
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951
        /* to same privilege */
952
        if (env->eflags & VM_MASK)
953
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
        new_stack = 0;
955
        if (ist != 0)
956
            esp = get_rsp_from_tss(ist + 3);
957
        else
958
            esp = ESP;
959
        esp &= ~0xfLL; /* align stack */
960
        dpl = cpl;
961
    } else {
962
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963
        new_stack = 0; /* avoid warning */
964
        esp = 0; /* avoid warning */
965
    }
966

    
967
    PUSHQ(esp, env->segs[R_SS].selector);
968
    PUSHQ(esp, ESP);
969
    PUSHQ(esp, compute_eflags());
970
    PUSHQ(esp, env->segs[R_CS].selector);
971
    PUSHQ(esp, old_eip);
972
    if (has_error_code) {
973
        PUSHQ(esp, error_code);
974
    }
975

    
976
    if (new_stack) {
977
        ss = 0 | dpl;
978
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979
    }
980
    ESP = esp;
981

    
982
    selector = (selector & ~3) | dpl;
983
    cpu_x86_load_seg_cache(env, R_CS, selector,
984
                   get_seg_base(e1, e2),
985
                   get_seg_limit(e1, e2),
986
                   e2);
987
    cpu_x86_set_cpl(env, dpl);
988
    env->eip = offset;
989

    
990
    /* interrupt gate clear IF mask */
991
    if ((type & 1) == 0) {
992
        env->eflags &= ~IF_MASK;
993
    }
994
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995
}
996
#endif
997

    
998
#ifdef TARGET_X86_64
999
#if defined(CONFIG_USER_ONLY)
1000
void helper_syscall(int next_eip_addend)
1001
{
1002
    env->exception_index = EXCP_SYSCALL;
1003
    env->exception_next_eip = env->eip + next_eip_addend;
1004
    cpu_loop_exit();
1005
}
1006
#else
1007
void helper_syscall(int next_eip_addend)
1008
{
1009
    int selector;
1010

    
1011
    if (!(env->efer & MSR_EFER_SCE)) {
1012
        raise_exception_err(EXCP06_ILLOP, 0);
1013
    }
1014
    selector = (env->star >> 32) & 0xffff;
1015
    if (env->hflags & HF_LMA_MASK) {
1016
        int code64;
1017

    
1018
        ECX = env->eip + next_eip_addend;
1019
        env->regs[11] = compute_eflags();
1020

    
1021
        code64 = env->hflags & HF_CS64_MASK;
1022

    
1023
        cpu_x86_set_cpl(env, 0);
1024
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025
                           0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK |
1028
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030
                               0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_W_MASK | DESC_A_MASK);
1034
        env->eflags &= ~env->fmask;
1035
        load_eflags(env->eflags, 0);
1036
        if (code64)
1037
            env->eip = env->lstar;
1038
        else
1039
            env->eip = env->cstar;
1040
    } else {
1041
        ECX = (uint32_t)(env->eip + next_eip_addend);
1042

    
1043
        cpu_x86_set_cpl(env, 0);
1044
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1045
                           0, 0xffffffff,
1046
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1047
                               DESC_S_MASK |
1048
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1049
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1050
                               0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_W_MASK | DESC_A_MASK);
1054
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1055
        env->eip = (uint32_t)env->star;
1056
    }
1057
}
1058
#endif
1059
#endif
1060

    
1061
#ifdef TARGET_X86_64
1062
void helper_sysret(int dflag)
1063
{
1064
    int cpl, selector;
1065

    
1066
    if (!(env->efer & MSR_EFER_SCE)) {
1067
        raise_exception_err(EXCP06_ILLOP, 0);
1068
    }
1069
    cpl = env->hflags & HF_CPL_MASK;
1070
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071
        raise_exception_err(EXCP0D_GPF, 0);
1072
    }
1073
    selector = (env->star >> 48) & 0xffff;
1074
    if (env->hflags & HF_LMA_MASK) {
1075
        if (dflag == 2) {
1076
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1077
                                   0, 0xffffffff,
1078
                                   DESC_G_MASK | DESC_P_MASK |
1079
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1081
                                   DESC_L_MASK);
1082
            env->eip = ECX;
1083
        } else {
1084
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1085
                                   0, 0xffffffff,
1086
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1089
            env->eip = (uint32_t)ECX;
1090
        }
1091
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1092
                               0, 0xffffffff,
1093
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095
                               DESC_W_MASK | DESC_A_MASK);
1096
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1097
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1098
        cpu_x86_set_cpl(env, 3);
1099
    } else {
1100
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1101
                               0, 0xffffffff,
1102
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1104
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1105
        env->eip = (uint32_t)ECX;
1106
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1107
                               0, 0xffffffff,
1108
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110
                               DESC_W_MASK | DESC_A_MASK);
1111
        env->eflags |= IF_MASK;
1112
        cpu_x86_set_cpl(env, 3);
1113
    }
1114
}
1115
#endif
1116

    
1117
/* real mode interrupt */
1118
static void do_interrupt_real(int intno, int is_int, int error_code,
1119
                              unsigned int next_eip)
1120
{
1121
    SegmentCache *dt;
1122
    target_ulong ptr, ssp;
1123
    int selector;
1124
    uint32_t offset, esp;
1125
    uint32_t old_cs, old_eip;
1126

    
1127
    /* real mode (simpler !) */
1128
    dt = &env->idt;
1129
    if (intno * 4 + 3 > dt->limit)
1130
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1131
    ptr = dt->base + intno * 4;
1132
    offset = lduw_kernel(ptr);
1133
    selector = lduw_kernel(ptr + 2);
1134
    esp = ESP;
1135
    ssp = env->segs[R_SS].base;
1136
    if (is_int)
1137
        old_eip = next_eip;
1138
    else
1139
        old_eip = env->eip;
1140
    old_cs = env->segs[R_CS].selector;
1141
    /* XXX: use SS segment size ? */
1142
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1143
    PUSHW(ssp, esp, 0xffff, old_cs);
1144
    PUSHW(ssp, esp, 0xffff, old_eip);
1145

    
1146
    /* update processor state */
1147
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1148
    env->eip = offset;
1149
    env->segs[R_CS].selector = selector;
1150
    env->segs[R_CS].base = (selector << 4);
1151
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1152
}
1153

    
1154
/* fake user mode interrupt */
1155
void do_interrupt_user(int intno, int is_int, int error_code,
1156
                       target_ulong next_eip)
1157
{
1158
    SegmentCache *dt;
1159
    target_ulong ptr;
1160
    int dpl, cpl, shift;
1161
    uint32_t e2;
1162

    
1163
    dt = &env->idt;
1164
    if (env->hflags & HF_LMA_MASK) {
1165
        shift = 4;
1166
    } else {
1167
        shift = 3;
1168
    }
1169
    ptr = dt->base + (intno << shift);
1170
    e2 = ldl_kernel(ptr + 4);
1171

    
1172
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1173
    cpl = env->hflags & HF_CPL_MASK;
1174
    /* check privilege if software int */
1175
    if (is_int && dpl < cpl)
1176
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177

    
1178
    /* Since we emulate only user space, we cannot do more than
1179
       exiting the emulation with the suitable exception and error
1180
       code */
1181
    if (is_int)
1182
        EIP = next_eip;
1183
}
1184

    
1185
#if !defined(CONFIG_USER_ONLY)
1186
static void handle_even_inj(int intno, int is_int, int error_code,
1187
                int is_hw, int rm)
1188
{
1189
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1190
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1191
            int type;
1192
            if (is_int)
1193
                    type = SVM_EVTINJ_TYPE_SOFT;
1194
            else
1195
                    type = SVM_EVTINJ_TYPE_EXEPT;
1196
            event_inj = intno | type | SVM_EVTINJ_VALID;
1197
            if (!rm && exeption_has_error_code(intno)) {
1198
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1199
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1200
            }
1201
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1202
    }
1203
}
1204
#endif
1205

    
1206
/*
1207
 * Begin execution of an interruption. is_int is TRUE if coming from
1208
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209
 * instruction. It is only relevant if is_int is TRUE.
1210
 */
1211
void do_interrupt(int intno, int is_int, int error_code,
1212
                  target_ulong next_eip, int is_hw)
1213
{
1214
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1215
        if ((env->cr[0] & CR0_PE_MASK)) {
1216
            static int count;
1217
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218
                    count, intno, error_code, is_int,
1219
                    env->hflags & HF_CPL_MASK,
1220
                    env->segs[R_CS].selector, EIP,
1221
                    (int)env->segs[R_CS].base + EIP,
1222
                    env->segs[R_SS].selector, ESP);
1223
            if (intno == 0x0e) {
1224
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225
            } else {
1226
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1227
            }
1228
            qemu_log("\n");
1229
            log_cpu_state(env, X86_DUMP_CCOP);
1230
#if 0
1231
            {
1232
                int i;
1233
                uint8_t *ptr;
1234
                qemu_log("       code=");
1235
                ptr = env->segs[R_CS].base + env->eip;
1236
                for(i = 0; i < 16; i++) {
1237
                    qemu_log(" %02x", ldub(ptr + i));
1238
                }
1239
                qemu_log("\n");
1240
            }
1241
#endif
1242
            count++;
1243
        }
1244
    }
1245
    if (env->cr[0] & CR0_PE_MASK) {
1246
#if !defined(CONFIG_USER_ONLY)
1247
        if (env->hflags & HF_SVMI_MASK)
1248
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1249
#endif
1250
#ifdef TARGET_X86_64
1251
        if (env->hflags & HF_LMA_MASK) {
1252
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1253
        } else
1254
#endif
1255
        {
1256
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1257
        }
1258
    } else {
1259
#if !defined(CONFIG_USER_ONLY)
1260
        if (env->hflags & HF_SVMI_MASK)
1261
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1262
#endif
1263
        do_interrupt_real(intno, is_int, error_code, next_eip);
1264
    }
1265

    
1266
#if !defined(CONFIG_USER_ONLY)
1267
    if (env->hflags & HF_SVMI_MASK) {
1268
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1269
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1270
    }
1271
#endif
1272
}
1273

    
1274
/* This should come from sysemu.h - if we could include it here... */
1275
void qemu_system_reset_request(void);
1276

    
1277
/*
1278
 * Check nested exceptions and change to double or triple fault if
1279
 * needed. It should only be called, if this is not an interrupt.
1280
 * Returns the new exception number.
1281
 */
1282
static int check_exception(int intno, int *error_code)
1283
{
1284
    int first_contributory = env->old_exception == 0 ||
1285
                              (env->old_exception >= 10 &&
1286
                               env->old_exception <= 13);
1287
    int second_contributory = intno == 0 ||
1288
                               (intno >= 10 && intno <= 13);
1289

    
1290
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1291
                env->old_exception, intno);
1292

    
1293
#if !defined(CONFIG_USER_ONLY)
1294
    if (env->old_exception == EXCP08_DBLE) {
1295
        if (env->hflags & HF_SVMI_MASK)
1296
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1297

    
1298
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1299

    
1300
        qemu_system_reset_request();
1301
        return EXCP_HLT;
1302
    }
1303
#endif
1304

    
1305
    if ((first_contributory && second_contributory)
1306
        || (env->old_exception == EXCP0E_PAGE &&
1307
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1308
        intno = EXCP08_DBLE;
1309
        *error_code = 0;
1310
    }
1311

    
1312
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1313
        (intno == EXCP08_DBLE))
1314
        env->old_exception = intno;
1315

    
1316
    return intno;
1317
}
1318

    
1319
/*
1320
 * Signal an interruption. It is executed in the main CPU loop.
1321
 * is_int is TRUE if coming from the int instruction. next_eip is the
1322
 * EIP value AFTER the interrupt instruction. It is only relevant if
1323
 * is_int is TRUE.
1324
 */
1325
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1326
                                          int next_eip_addend)
1327
{
1328
    if (!is_int) {
1329
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1330
        intno = check_exception(intno, &error_code);
1331
    } else {
1332
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1333
    }
1334

    
1335
    env->exception_index = intno;
1336
    env->error_code = error_code;
1337
    env->exception_is_int = is_int;
1338
    env->exception_next_eip = env->eip + next_eip_addend;
1339
    cpu_loop_exit();
1340
}
1341

    
1342
/* shortcuts to generate exceptions */
1343

    
1344
void raise_exception_err(int exception_index, int error_code)
1345
{
1346
    raise_interrupt(exception_index, 0, error_code, 0);
1347
}
1348

    
1349
void raise_exception(int exception_index)
1350
{
1351
    raise_interrupt(exception_index, 0, 0, 0);
1352
}
1353

    
1354
void raise_exception_env(int exception_index, CPUState *nenv)
1355
{
1356
    env = nenv;
1357
    raise_exception(exception_index);
1358
}
1359
/* SMM support */
1360

    
1361
#if defined(CONFIG_USER_ONLY)
1362

    
1363
void do_smm_enter(void)
1364
{
1365
}
1366

    
1367
void helper_rsm(void)
1368
{
1369
}
1370

    
1371
#else
1372

    
1373
#ifdef TARGET_X86_64
1374
#define SMM_REVISION_ID 0x00020064
1375
#else
1376
#define SMM_REVISION_ID 0x00020000
1377
#endif
1378

    
1379
void do_smm_enter(void)
1380
{
1381
    target_ulong sm_state;
1382
    SegmentCache *dt;
1383
    int i, offset;
1384

    
1385
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1386
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1387

    
1388
    env->hflags |= HF_SMM_MASK;
1389
    cpu_smm_update(env);
1390

    
1391
    sm_state = env->smbase + 0x8000;
1392

    
1393
#ifdef TARGET_X86_64
1394
    for(i = 0; i < 6; i++) {
1395
        dt = &env->segs[i];
1396
        offset = 0x7e00 + i * 16;
1397
        stw_phys(sm_state + offset, dt->selector);
1398
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1399
        stl_phys(sm_state + offset + 4, dt->limit);
1400
        stq_phys(sm_state + offset + 8, dt->base);
1401
    }
1402

    
1403
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1404
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1405

    
1406
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1407
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1408
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1409
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1410

    
1411
    stq_phys(sm_state + 0x7e88, env->idt.base);
1412
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1413

    
1414
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1415
    stq_phys(sm_state + 0x7e98, env->tr.base);
1416
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1417
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1418

    
1419
    stq_phys(sm_state + 0x7ed0, env->efer);
1420

    
1421
    stq_phys(sm_state + 0x7ff8, EAX);
1422
    stq_phys(sm_state + 0x7ff0, ECX);
1423
    stq_phys(sm_state + 0x7fe8, EDX);
1424
    stq_phys(sm_state + 0x7fe0, EBX);
1425
    stq_phys(sm_state + 0x7fd8, ESP);
1426
    stq_phys(sm_state + 0x7fd0, EBP);
1427
    stq_phys(sm_state + 0x7fc8, ESI);
1428
    stq_phys(sm_state + 0x7fc0, EDI);
1429
    for(i = 8; i < 16; i++)
1430
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1431
    stq_phys(sm_state + 0x7f78, env->eip);
1432
    stl_phys(sm_state + 0x7f70, compute_eflags());
1433
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1434
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1435

    
1436
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1437
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1438
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1439

    
1440
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1441
    stl_phys(sm_state + 0x7f00, env->smbase);
1442
#else
1443
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1444
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1445
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1446
    stl_phys(sm_state + 0x7ff0, env->eip);
1447
    stl_phys(sm_state + 0x7fec, EDI);
1448
    stl_phys(sm_state + 0x7fe8, ESI);
1449
    stl_phys(sm_state + 0x7fe4, EBP);
1450
    stl_phys(sm_state + 0x7fe0, ESP);
1451
    stl_phys(sm_state + 0x7fdc, EBX);
1452
    stl_phys(sm_state + 0x7fd8, EDX);
1453
    stl_phys(sm_state + 0x7fd4, ECX);
1454
    stl_phys(sm_state + 0x7fd0, EAX);
1455
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1456
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1457

    
1458
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1459
    stl_phys(sm_state + 0x7f64, env->tr.base);
1460
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1461
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1462

    
1463
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1464
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1465
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1466
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1467

    
1468
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1469
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1470

    
1471
    stl_phys(sm_state + 0x7f58, env->idt.base);
1472
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1473

    
1474
    for(i = 0; i < 6; i++) {
1475
        dt = &env->segs[i];
1476
        if (i < 3)
1477
            offset = 0x7f84 + i * 12;
1478
        else
1479
            offset = 0x7f2c + (i - 3) * 12;
1480
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1481
        stl_phys(sm_state + offset + 8, dt->base);
1482
        stl_phys(sm_state + offset + 4, dt->limit);
1483
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1484
    }
1485
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1486

    
1487
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1488
    stl_phys(sm_state + 0x7ef8, env->smbase);
1489
#endif
1490
    /* init SMM cpu state */
1491

    
1492
#ifdef TARGET_X86_64
1493
    cpu_load_efer(env, 0);
1494
#endif
1495
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1496
    env->eip = 0x00008000;
1497
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1498
                           0xffffffff, 0);
1499
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1500
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1501
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1502
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1503
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1504

    
1505
    cpu_x86_update_cr0(env,
1506
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1507
    cpu_x86_update_cr4(env, 0);
1508
    env->dr[7] = 0x00000400;
1509
    CC_OP = CC_OP_EFLAGS;
1510
}
1511

    
1512
void helper_rsm(void)
1513
{
1514
    target_ulong sm_state;
1515
    int i, offset;
1516
    uint32_t val;
1517

    
1518
    sm_state = env->smbase + 0x8000;
1519
#ifdef TARGET_X86_64
1520
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1521

    
1522
    for(i = 0; i < 6; i++) {
1523
        offset = 0x7e00 + i * 16;
1524
        cpu_x86_load_seg_cache(env, i,
1525
                               lduw_phys(sm_state + offset),
1526
                               ldq_phys(sm_state + offset + 8),
1527
                               ldl_phys(sm_state + offset + 4),
1528
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1529
    }
1530

    
1531
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1532
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1533

    
1534
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1535
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1536
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1537
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1538

    
1539
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1540
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1541

    
1542
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1543
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1544
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1545
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1546

    
1547
    EAX = ldq_phys(sm_state + 0x7ff8);
1548
    ECX = ldq_phys(sm_state + 0x7ff0);
1549
    EDX = ldq_phys(sm_state + 0x7fe8);
1550
    EBX = ldq_phys(sm_state + 0x7fe0);
1551
    ESP = ldq_phys(sm_state + 0x7fd8);
1552
    EBP = ldq_phys(sm_state + 0x7fd0);
1553
    ESI = ldq_phys(sm_state + 0x7fc8);
1554
    EDI = ldq_phys(sm_state + 0x7fc0);
1555
    for(i = 8; i < 16; i++)
1556
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1557
    env->eip = ldq_phys(sm_state + 0x7f78);
1558
    load_eflags(ldl_phys(sm_state + 0x7f70),
1559
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1560
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1561
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1562

    
1563
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1564
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1565
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1566

    
1567
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1568
    if (val & 0x20000) {
1569
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1570
    }
1571
#else
1572
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1573
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1574
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1575
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1576
    env->eip = ldl_phys(sm_state + 0x7ff0);
1577
    EDI = ldl_phys(sm_state + 0x7fec);
1578
    ESI = ldl_phys(sm_state + 0x7fe8);
1579
    EBP = ldl_phys(sm_state + 0x7fe4);
1580
    ESP = ldl_phys(sm_state + 0x7fe0);
1581
    EBX = ldl_phys(sm_state + 0x7fdc);
1582
    EDX = ldl_phys(sm_state + 0x7fd8);
1583
    ECX = ldl_phys(sm_state + 0x7fd4);
1584
    EAX = ldl_phys(sm_state + 0x7fd0);
1585
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1586
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1587

    
1588
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1589
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1590
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1591
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1592

    
1593
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1594
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1595
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1596
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1597

    
1598
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1599
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1600

    
1601
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1602
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1603

    
1604
    for(i = 0; i < 6; i++) {
1605
        if (i < 3)
1606
            offset = 0x7f84 + i * 12;
1607
        else
1608
            offset = 0x7f2c + (i - 3) * 12;
1609
        cpu_x86_load_seg_cache(env, i,
1610
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1611
                               ldl_phys(sm_state + offset + 8),
1612
                               ldl_phys(sm_state + offset + 4),
1613
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1614
    }
1615
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1616

    
1617
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1618
    if (val & 0x20000) {
1619
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1620
    }
1621
#endif
1622
    CC_OP = CC_OP_EFLAGS;
1623
    env->hflags &= ~HF_SMM_MASK;
1624
    cpu_smm_update(env);
1625

    
1626
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1627
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1628
}
1629

    
1630
#endif /* !CONFIG_USER_ONLY */
1631

    
1632

    
1633
/* division, flags are undefined */
1634

    
1635
void helper_divb_AL(target_ulong t0)
1636
{
1637
    unsigned int num, den, q, r;
1638

    
1639
    num = (EAX & 0xffff);
1640
    den = (t0 & 0xff);
1641
    if (den == 0) {
1642
        raise_exception(EXCP00_DIVZ);
1643
    }
1644
    q = (num / den);
1645
    if (q > 0xff)
1646
        raise_exception(EXCP00_DIVZ);
1647
    q &= 0xff;
1648
    r = (num % den) & 0xff;
1649
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1650
}
1651

    
1652
void helper_idivb_AL(target_ulong t0)
1653
{
1654
    int num, den, q, r;
1655

    
1656
    num = (int16_t)EAX;
1657
    den = (int8_t)t0;
1658
    if (den == 0) {
1659
        raise_exception(EXCP00_DIVZ);
1660
    }
1661
    q = (num / den);
1662
    if (q != (int8_t)q)
1663
        raise_exception(EXCP00_DIVZ);
1664
    q &= 0xff;
1665
    r = (num % den) & 0xff;
1666
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1667
}
1668

    
1669
void helper_divw_AX(target_ulong t0)
1670
{
1671
    unsigned int num, den, q, r;
1672

    
1673
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1674
    den = (t0 & 0xffff);
1675
    if (den == 0) {
1676
        raise_exception(EXCP00_DIVZ);
1677
    }
1678
    q = (num / den);
1679
    if (q > 0xffff)
1680
        raise_exception(EXCP00_DIVZ);
1681
    q &= 0xffff;
1682
    r = (num % den) & 0xffff;
1683
    EAX = (EAX & ~0xffff) | q;
1684
    EDX = (EDX & ~0xffff) | r;
1685
}
1686

    
1687
void helper_idivw_AX(target_ulong t0)
1688
{
1689
    int num, den, q, r;
1690

    
1691
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1692
    den = (int16_t)t0;
1693
    if (den == 0) {
1694
        raise_exception(EXCP00_DIVZ);
1695
    }
1696
    q = (num / den);
1697
    if (q != (int16_t)q)
1698
        raise_exception(EXCP00_DIVZ);
1699
    q &= 0xffff;
1700
    r = (num % den) & 0xffff;
1701
    EAX = (EAX & ~0xffff) | q;
1702
    EDX = (EDX & ~0xffff) | r;
1703
}
1704

    
1705
void helper_divl_EAX(target_ulong t0)
1706
{
1707
    unsigned int den, r;
1708
    uint64_t num, q;
1709

    
1710
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1711
    den = t0;
1712
    if (den == 0) {
1713
        raise_exception(EXCP00_DIVZ);
1714
    }
1715
    q = (num / den);
1716
    r = (num % den);
1717
    if (q > 0xffffffff)
1718
        raise_exception(EXCP00_DIVZ);
1719
    EAX = (uint32_t)q;
1720
    EDX = (uint32_t)r;
1721
}
1722

    
1723
void helper_idivl_EAX(target_ulong t0)
1724
{
1725
    int den, r;
1726
    int64_t num, q;
1727

    
1728
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1729
    den = t0;
1730
    if (den == 0) {
1731
        raise_exception(EXCP00_DIVZ);
1732
    }
1733
    q = (num / den);
1734
    r = (num % den);
1735
    if (q != (int32_t)q)
1736
        raise_exception(EXCP00_DIVZ);
1737
    EAX = (uint32_t)q;
1738
    EDX = (uint32_t)r;
1739
}
1740

    
1741
/* bcd */
1742

    
1743
/* XXX: exception */
1744
void helper_aam(int base)
1745
{
1746
    int al, ah;
1747
    al = EAX & 0xff;
1748
    ah = al / base;
1749
    al = al % base;
1750
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1751
    CC_DST = al;
1752
}
1753

    
1754
void helper_aad(int base)
1755
{
1756
    int al, ah;
1757
    al = EAX & 0xff;
1758
    ah = (EAX >> 8) & 0xff;
1759
    al = ((ah * base) + al) & 0xff;
1760
    EAX = (EAX & ~0xffff) | al;
1761
    CC_DST = al;
1762
}
1763

    
1764
void helper_aaa(void)
1765
{
1766
    int icarry;
1767
    int al, ah, af;
1768
    int eflags;
1769

    
1770
    eflags = helper_cc_compute_all(CC_OP);
1771
    af = eflags & CC_A;
1772
    al = EAX & 0xff;
1773
    ah = (EAX >> 8) & 0xff;
1774

    
1775
    icarry = (al > 0xf9);
1776
    if (((al & 0x0f) > 9 ) || af) {
1777
        al = (al + 6) & 0x0f;
1778
        ah = (ah + 1 + icarry) & 0xff;
1779
        eflags |= CC_C | CC_A;
1780
    } else {
1781
        eflags &= ~(CC_C | CC_A);
1782
        al &= 0x0f;
1783
    }
1784
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1785
    CC_SRC = eflags;
1786
}
1787

    
1788
void helper_aas(void)
1789
{
1790
    int icarry;
1791
    int al, ah, af;
1792
    int eflags;
1793

    
1794
    eflags = helper_cc_compute_all(CC_OP);
1795
    af = eflags & CC_A;
1796
    al = EAX & 0xff;
1797
    ah = (EAX >> 8) & 0xff;
1798

    
1799
    icarry = (al < 6);
1800
    if (((al & 0x0f) > 9 ) || af) {
1801
        al = (al - 6) & 0x0f;
1802
        ah = (ah - 1 - icarry) & 0xff;
1803
        eflags |= CC_C | CC_A;
1804
    } else {
1805
        eflags &= ~(CC_C | CC_A);
1806
        al &= 0x0f;
1807
    }
1808
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1809
    CC_SRC = eflags;
1810
}
1811

    
1812
void helper_daa(void)
1813
{
1814
    int al, af, cf;
1815
    int eflags;
1816

    
1817
    eflags = helper_cc_compute_all(CC_OP);
1818
    cf = eflags & CC_C;
1819
    af = eflags & CC_A;
1820
    al = EAX & 0xff;
1821

    
1822
    eflags = 0;
1823
    if (((al & 0x0f) > 9 ) || af) {
1824
        al = (al + 6) & 0xff;
1825
        eflags |= CC_A;
1826
    }
1827
    if ((al > 0x9f) || cf) {
1828
        al = (al + 0x60) & 0xff;
1829
        eflags |= CC_C;
1830
    }
1831
    EAX = (EAX & ~0xff) | al;
1832
    /* well, speed is not an issue here, so we compute the flags by hand */
1833
    eflags |= (al == 0) << 6; /* zf */
1834
    eflags |= parity_table[al]; /* pf */
1835
    eflags |= (al & 0x80); /* sf */
1836
    CC_SRC = eflags;
1837
}
1838

    
1839
void helper_das(void)
1840
{
1841
    int al, al1, af, cf;
1842
    int eflags;
1843

    
1844
    eflags = helper_cc_compute_all(CC_OP);
1845
    cf = eflags & CC_C;
1846
    af = eflags & CC_A;
1847
    al = EAX & 0xff;
1848

    
1849
    eflags = 0;
1850
    al1 = al;
1851
    if (((al & 0x0f) > 9 ) || af) {
1852
        eflags |= CC_A;
1853
        if (al < 6 || cf)
1854
            eflags |= CC_C;
1855
        al = (al - 6) & 0xff;
1856
    }
1857
    if ((al1 > 0x99) || cf) {
1858
        al = (al - 0x60) & 0xff;
1859
        eflags |= CC_C;
1860
    }
1861
    EAX = (EAX & ~0xff) | al;
1862
    /* well, speed is not an issue here, so we compute the flags by hand */
1863
    eflags |= (al == 0) << 6; /* zf */
1864
    eflags |= parity_table[al]; /* pf */
1865
    eflags |= (al & 0x80); /* sf */
1866
    CC_SRC = eflags;
1867
}
1868

    
1869
void helper_into(int next_eip_addend)
1870
{
1871
    int eflags;
1872
    eflags = helper_cc_compute_all(CC_OP);
1873
    if (eflags & CC_O) {
1874
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1875
    }
1876
}
1877

    
1878
void helper_cmpxchg8b(target_ulong a0)
1879
{
1880
    uint64_t d;
1881
    int eflags;
1882

    
1883
    eflags = helper_cc_compute_all(CC_OP);
1884
    d = ldq(a0);
1885
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1886
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1887
        eflags |= CC_Z;
1888
    } else {
1889
        /* always do the store */
1890
        stq(a0, d); 
1891
        EDX = (uint32_t)(d >> 32);
1892
        EAX = (uint32_t)d;
1893
        eflags &= ~CC_Z;
1894
    }
1895
    CC_SRC = eflags;
1896
}
1897

    
1898
#ifdef TARGET_X86_64
1899
void helper_cmpxchg16b(target_ulong a0)
1900
{
1901
    uint64_t d0, d1;
1902
    int eflags;
1903

    
1904
    if ((a0 & 0xf) != 0)
1905
        raise_exception(EXCP0D_GPF);
1906
    eflags = helper_cc_compute_all(CC_OP);
1907
    d0 = ldq(a0);
1908
    d1 = ldq(a0 + 8);
1909
    if (d0 == EAX && d1 == EDX) {
1910
        stq(a0, EBX);
1911
        stq(a0 + 8, ECX);
1912
        eflags |= CC_Z;
1913
    } else {
1914
        /* always do the store */
1915
        stq(a0, d0); 
1916
        stq(a0 + 8, d1); 
1917
        EDX = d1;
1918
        EAX = d0;
1919
        eflags &= ~CC_Z;
1920
    }
1921
    CC_SRC = eflags;
1922
}
1923
#endif
1924

    
1925
void helper_single_step(void)
1926
{
1927
#ifndef CONFIG_USER_ONLY
1928
    check_hw_breakpoints(env, 1);
1929
    env->dr[6] |= DR6_BS;
1930
#endif
1931
    raise_exception(EXCP01_DB);
1932
}
1933

    
1934
void helper_cpuid(void)
1935
{
1936
    uint32_t eax, ebx, ecx, edx;
1937

    
1938
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1939

    
1940
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1941
    EAX = eax;
1942
    EBX = ebx;
1943
    ECX = ecx;
1944
    EDX = edx;
1945
}
1946

    
1947
void helper_enter_level(int level, int data32, target_ulong t1)
1948
{
1949
    target_ulong ssp;
1950
    uint32_t esp_mask, esp, ebp;
1951

    
1952
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1953
    ssp = env->segs[R_SS].base;
1954
    ebp = EBP;
1955
    esp = ESP;
1956
    if (data32) {
1957
        /* 32 bit */
1958
        esp -= 4;
1959
        while (--level) {
1960
            esp -= 4;
1961
            ebp -= 4;
1962
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1963
        }
1964
        esp -= 4;
1965
        stl(ssp + (esp & esp_mask), t1);
1966
    } else {
1967
        /* 16 bit */
1968
        esp -= 2;
1969
        while (--level) {
1970
            esp -= 2;
1971
            ebp -= 2;
1972
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1973
        }
1974
        esp -= 2;
1975
        stw(ssp + (esp & esp_mask), t1);
1976
    }
1977
}
1978

    
1979
#ifdef TARGET_X86_64
1980
void helper_enter64_level(int level, int data64, target_ulong t1)
1981
{
1982
    target_ulong esp, ebp;
1983
    ebp = EBP;
1984
    esp = ESP;
1985

    
1986
    if (data64) {
1987
        /* 64 bit */
1988
        esp -= 8;
1989
        while (--level) {
1990
            esp -= 8;
1991
            ebp -= 8;
1992
            stq(esp, ldq(ebp));
1993
        }
1994
        esp -= 8;
1995
        stq(esp, t1);
1996
    } else {
1997
        /* 16 bit */
1998
        esp -= 2;
1999
        while (--level) {
2000
            esp -= 2;
2001
            ebp -= 2;
2002
            stw(esp, lduw(ebp));
2003
        }
2004
        esp -= 2;
2005
        stw(esp, t1);
2006
    }
2007
}
2008
#endif
2009

    
2010
void helper_lldt(int selector)
2011
{
2012
    SegmentCache *dt;
2013
    uint32_t e1, e2;
2014
    int index, entry_limit;
2015
    target_ulong ptr;
2016

    
2017
    selector &= 0xffff;
2018
    if ((selector & 0xfffc) == 0) {
2019
        /* XXX: NULL selector case: invalid LDT */
2020
        env->ldt.base = 0;
2021
        env->ldt.limit = 0;
2022
    } else {
2023
        if (selector & 0x4)
2024
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2025
        dt = &env->gdt;
2026
        index = selector & ~7;
2027
#ifdef TARGET_X86_64
2028
        if (env->hflags & HF_LMA_MASK)
2029
            entry_limit = 15;
2030
        else
2031
#endif
2032
            entry_limit = 7;
2033
        if ((index + entry_limit) > dt->limit)
2034
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035
        ptr = dt->base + index;
2036
        e1 = ldl_kernel(ptr);
2037
        e2 = ldl_kernel(ptr + 4);
2038
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2039
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040
        if (!(e2 & DESC_P_MASK))
2041
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2042
#ifdef TARGET_X86_64
2043
        if (env->hflags & HF_LMA_MASK) {
2044
            uint32_t e3;
2045
            e3 = ldl_kernel(ptr + 8);
2046
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2047
            env->ldt.base |= (target_ulong)e3 << 32;
2048
        } else
2049
#endif
2050
        {
2051
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052
        }
2053
    }
2054
    env->ldt.selector = selector;
2055
}
2056

    
2057
void helper_ltr(int selector)
2058
{
2059
    SegmentCache *dt;
2060
    uint32_t e1, e2;
2061
    int index, type, entry_limit;
2062
    target_ulong ptr;
2063

    
2064
    selector &= 0xffff;
2065
    if ((selector & 0xfffc) == 0) {
2066
        /* NULL selector case: invalid TR */
2067
        env->tr.base = 0;
2068
        env->tr.limit = 0;
2069
        env->tr.flags = 0;
2070
    } else {
2071
        if (selector & 0x4)
2072
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2073
        dt = &env->gdt;
2074
        index = selector & ~7;
2075
#ifdef TARGET_X86_64
2076
        if (env->hflags & HF_LMA_MASK)
2077
            entry_limit = 15;
2078
        else
2079
#endif
2080
            entry_limit = 7;
2081
        if ((index + entry_limit) > dt->limit)
2082
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083
        ptr = dt->base + index;
2084
        e1 = ldl_kernel(ptr);
2085
        e2 = ldl_kernel(ptr + 4);
2086
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2087
        if ((e2 & DESC_S_MASK) ||
2088
            (type != 1 && type != 9))
2089
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2090
        if (!(e2 & DESC_P_MASK))
2091
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2092
#ifdef TARGET_X86_64
2093
        if (env->hflags & HF_LMA_MASK) {
2094
            uint32_t e3, e4;
2095
            e3 = ldl_kernel(ptr + 8);
2096
            e4 = ldl_kernel(ptr + 12);
2097
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2098
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2099
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2100
            env->tr.base |= (target_ulong)e3 << 32;
2101
        } else
2102
#endif
2103
        {
2104
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2105
        }
2106
        e2 |= DESC_TSS_BUSY_MASK;
2107
        stl_kernel(ptr + 4, e2);
2108
    }
2109
    env->tr.selector = selector;
2110
}
2111

    
2112
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2113
void helper_load_seg(int seg_reg, int selector)
2114
{
2115
    uint32_t e1, e2;
2116
    int cpl, dpl, rpl;
2117
    SegmentCache *dt;
2118
    int index;
2119
    target_ulong ptr;
2120

    
2121
    selector &= 0xffff;
2122
    cpl = env->hflags & HF_CPL_MASK;
2123
    if ((selector & 0xfffc) == 0) {
2124
        /* null selector case */
2125
        if (seg_reg == R_SS
2126
#ifdef TARGET_X86_64
2127
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2128
#endif
2129
            )
2130
            raise_exception_err(EXCP0D_GPF, 0);
2131
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2132
    } else {
2133

    
2134
        if (selector & 0x4)
2135
            dt = &env->ldt;
2136
        else
2137
            dt = &env->gdt;
2138
        index = selector & ~7;
2139
        if ((index + 7) > dt->limit)
2140
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2141
        ptr = dt->base + index;
2142
        e1 = ldl_kernel(ptr);
2143
        e2 = ldl_kernel(ptr + 4);
2144

    
2145
        if (!(e2 & DESC_S_MASK))
2146
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147
        rpl = selector & 3;
2148
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2149
        if (seg_reg == R_SS) {
2150
            /* must be writable segment */
2151
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2152
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2153
            if (rpl != cpl || dpl != cpl)
2154
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2155
        } else {
2156
            /* must be readable segment */
2157
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2158
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159

    
2160
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2161
                /* if not conforming code, test rights */
2162
                if (dpl < cpl || dpl < rpl)
2163
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164
            }
2165
        }
2166

    
2167
        if (!(e2 & DESC_P_MASK)) {
2168
            if (seg_reg == R_SS)
2169
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2170
            else
2171
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2172
        }
2173

    
2174
        /* set the access bit if not already set */
2175
        if (!(e2 & DESC_A_MASK)) {
2176
            e2 |= DESC_A_MASK;
2177
            stl_kernel(ptr + 4, e2);
2178
        }
2179

    
2180
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2181
                       get_seg_base(e1, e2),
2182
                       get_seg_limit(e1, e2),
2183
                       e2);
2184
#if 0
2185
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2186
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2187
#endif
2188
    }
2189
}
2190

    
2191
/* protected mode jump */
2192
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2193
                           int next_eip_addend)
2194
{
2195
    int gate_cs, type;
2196
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2197
    target_ulong next_eip;
2198

    
2199
    if ((new_cs & 0xfffc) == 0)
2200
        raise_exception_err(EXCP0D_GPF, 0);
2201
    if (load_segment(&e1, &e2, new_cs) != 0)
2202
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203
    cpl = env->hflags & HF_CPL_MASK;
2204
    if (e2 & DESC_S_MASK) {
2205
        if (!(e2 & DESC_CS_MASK))
2206
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2208
        if (e2 & DESC_C_MASK) {
2209
            /* conforming code segment */
2210
            if (dpl > cpl)
2211
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
        } else {
2213
            /* non conforming code segment */
2214
            rpl = new_cs & 3;
2215
            if (rpl > cpl)
2216
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
            if (dpl != cpl)
2218
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2219
        }
2220
        if (!(e2 & DESC_P_MASK))
2221
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2222
        limit = get_seg_limit(e1, e2);
2223
        if (new_eip > limit &&
2224
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2225
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2226
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2227
                       get_seg_base(e1, e2), limit, e2);
2228
        EIP = new_eip;
2229
    } else {
2230
        /* jump to call or task gate */
2231
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2232
        rpl = new_cs & 3;
2233
        cpl = env->hflags & HF_CPL_MASK;
2234
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2235
        switch(type) {
2236
        case 1: /* 286 TSS */
2237
        case 9: /* 386 TSS */
2238
        case 5: /* task gate */
2239
            if (dpl < cpl || dpl < rpl)
2240
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2241
            next_eip = env->eip + next_eip_addend;
2242
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2243
            CC_OP = CC_OP_EFLAGS;
2244
            break;
2245
        case 4: /* 286 call gate */
2246
        case 12: /* 386 call gate */
2247
            if ((dpl < cpl) || (dpl < rpl))
2248
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2249
            if (!(e2 & DESC_P_MASK))
2250
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2251
            gate_cs = e1 >> 16;
2252
            new_eip = (e1 & 0xffff);
2253
            if (type == 12)
2254
                new_eip |= (e2 & 0xffff0000);
2255
            if (load_segment(&e1, &e2, gate_cs) != 0)
2256
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2257
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2258
            /* must be code segment */
2259
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2260
                 (DESC_S_MASK | DESC_CS_MASK)))
2261
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2263
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2264
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2265
            if (!(e2 & DESC_P_MASK))
2266
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267
            limit = get_seg_limit(e1, e2);
2268
            if (new_eip > limit)
2269
                raise_exception_err(EXCP0D_GPF, 0);
2270
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2271
                                   get_seg_base(e1, e2), limit, e2);
2272
            EIP = new_eip;
2273
            break;
2274
        default:
2275
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2276
            break;
2277
        }
2278
    }
2279
}
2280

    
2281
/* real mode call */
2282
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2283
                       int shift, int next_eip)
2284
{
2285
    int new_eip;
2286
    uint32_t esp, esp_mask;
2287
    target_ulong ssp;
2288

    
2289
    new_eip = new_eip1;
2290
    esp = ESP;
2291
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2292
    ssp = env->segs[R_SS].base;
2293
    if (shift) {
2294
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2295
        PUSHL(ssp, esp, esp_mask, next_eip);
2296
    } else {
2297
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2298
        PUSHW(ssp, esp, esp_mask, next_eip);
2299
    }
2300

    
2301
    SET_ESP(esp, esp_mask);
2302
    env->eip = new_eip;
2303
    env->segs[R_CS].selector = new_cs;
2304
    env->segs[R_CS].base = (new_cs << 4);
2305
}
2306

    
2307
/* protected mode call */
2308
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2309
                            int shift, int next_eip_addend)
2310
{
2311
    int new_stack, i;
2312
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2313
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2314
    uint32_t val, limit, old_sp_mask;
2315
    target_ulong ssp, old_ssp, next_eip;
2316

    
2317
    next_eip = env->eip + next_eip_addend;
2318
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2319
    LOG_PCALL_STATE(env);
2320
    if ((new_cs & 0xfffc) == 0)
2321
        raise_exception_err(EXCP0D_GPF, 0);
2322
    if (load_segment(&e1, &e2, new_cs) != 0)
2323
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2324
    cpl = env->hflags & HF_CPL_MASK;
2325
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2326
    if (e2 & DESC_S_MASK) {
2327
        if (!(e2 & DESC_CS_MASK))
2328
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2330
        if (e2 & DESC_C_MASK) {
2331
            /* conforming code segment */
2332
            if (dpl > cpl)
2333
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
        } else {
2335
            /* non conforming code segment */
2336
            rpl = new_cs & 3;
2337
            if (rpl > cpl)
2338
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339
            if (dpl != cpl)
2340
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2341
        }
2342
        if (!(e2 & DESC_P_MASK))
2343
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2344

    
2345
#ifdef TARGET_X86_64
2346
        /* XXX: check 16/32 bit cases in long mode */
2347
        if (shift == 2) {
2348
            target_ulong rsp;
2349
            /* 64 bit case */
2350
            rsp = ESP;
2351
            PUSHQ(rsp, env->segs[R_CS].selector);
2352
            PUSHQ(rsp, next_eip);
2353
            /* from this point, not restartable */
2354
            ESP = rsp;
2355
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2356
                                   get_seg_base(e1, e2),
2357
                                   get_seg_limit(e1, e2), e2);
2358
            EIP = new_eip;
2359
        } else
2360
#endif
2361
        {
2362
            sp = ESP;
2363
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2364
            ssp = env->segs[R_SS].base;
2365
            if (shift) {
2366
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2367
                PUSHL(ssp, sp, sp_mask, next_eip);
2368
            } else {
2369
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2370
                PUSHW(ssp, sp, sp_mask, next_eip);
2371
            }
2372

    
2373
            limit = get_seg_limit(e1, e2);
2374
            if (new_eip > limit)
2375
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376
            /* from this point, not restartable */
2377
            SET_ESP(sp, sp_mask);
2378
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2379
                                   get_seg_base(e1, e2), limit, e2);
2380
            EIP = new_eip;
2381
        }
2382
    } else {
2383
        /* check gate type */
2384
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2385
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2386
        rpl = new_cs & 3;
2387
        switch(type) {
2388
        case 1: /* available 286 TSS */
2389
        case 9: /* available 386 TSS */
2390
        case 5: /* task gate */
2391
            if (dpl < cpl || dpl < rpl)
2392
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2393
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2394
            CC_OP = CC_OP_EFLAGS;
2395
            return;
2396
        case 4: /* 286 call gate */
2397
        case 12: /* 386 call gate */
2398
            break;
2399
        default:
2400
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2401
            break;
2402
        }
2403
        shift = type >> 3;
2404

    
2405
        if (dpl < cpl || dpl < rpl)
2406
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407
        /* check valid bit */
2408
        if (!(e2 & DESC_P_MASK))
2409
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2410
        selector = e1 >> 16;
2411
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2412
        param_count = e2 & 0x1f;
2413
        if ((selector & 0xfffc) == 0)
2414
            raise_exception_err(EXCP0D_GPF, 0);
2415

    
2416
        if (load_segment(&e1, &e2, selector) != 0)
2417
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2418
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2419
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2420
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2421
        if (dpl > cpl)
2422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423
        if (!(e2 & DESC_P_MASK))
2424
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2425

    
2426
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2427
            /* to inner privilege */
2428
            get_ss_esp_from_tss(&ss, &sp, dpl);
2429
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2430
                        ss, sp, param_count, ESP);
2431
            if ((ss & 0xfffc) == 0)
2432
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2433
            if ((ss & 3) != dpl)
2434
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2435
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2436
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2437
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2438
            if (ss_dpl != dpl)
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            if (!(ss_e2 & DESC_S_MASK) ||
2441
                (ss_e2 & DESC_CS_MASK) ||
2442
                !(ss_e2 & DESC_W_MASK))
2443
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2444
            if (!(ss_e2 & DESC_P_MASK))
2445
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2446

    
2447
            //            push_size = ((param_count * 2) + 8) << shift;
2448

    
2449
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2450
            old_ssp = env->segs[R_SS].base;
2451

    
2452
            sp_mask = get_sp_mask(ss_e2);
2453
            ssp = get_seg_base(ss_e1, ss_e2);
2454
            if (shift) {
2455
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2456
                PUSHL(ssp, sp, sp_mask, ESP);
2457
                for(i = param_count - 1; i >= 0; i--) {
2458
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2459
                    PUSHL(ssp, sp, sp_mask, val);
2460
                }
2461
            } else {
2462
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2463
                PUSHW(ssp, sp, sp_mask, ESP);
2464
                for(i = param_count - 1; i >= 0; i--) {
2465
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2466
                    PUSHW(ssp, sp, sp_mask, val);
2467
                }
2468
            }
2469
            new_stack = 1;
2470
        } else {
2471
            /* to same privilege */
2472
            sp = ESP;
2473
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2474
            ssp = env->segs[R_SS].base;
2475
            //            push_size = (4 << shift);
2476
            new_stack = 0;
2477
        }
2478

    
2479
        if (shift) {
2480
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481
            PUSHL(ssp, sp, sp_mask, next_eip);
2482
        } else {
2483
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484
            PUSHW(ssp, sp, sp_mask, next_eip);
2485
        }
2486

    
2487
        /* from this point, not restartable */
2488

    
2489
        if (new_stack) {
2490
            ss = (ss & ~3) | dpl;
2491
            cpu_x86_load_seg_cache(env, R_SS, ss,
2492
                                   ssp,
2493
                                   get_seg_limit(ss_e1, ss_e2),
2494
                                   ss_e2);
2495
        }
2496

    
2497
        selector = (selector & ~3) | dpl;
2498
        cpu_x86_load_seg_cache(env, R_CS, selector,
2499
                       get_seg_base(e1, e2),
2500
                       get_seg_limit(e1, e2),
2501
                       e2);
2502
        cpu_x86_set_cpl(env, dpl);
2503
        SET_ESP(sp, sp_mask);
2504
        EIP = offset;
2505
    }
2506
}
2507

    
2508
/* real and vm86 mode iret */
2509
void helper_iret_real(int shift)
2510
{
2511
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2512
    target_ulong ssp;
2513
    int eflags_mask;
2514

    
2515
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2516
    sp = ESP;
2517
    ssp = env->segs[R_SS].base;
2518
    if (shift == 1) {
2519
        /* 32 bits */
2520
        POPL(ssp, sp, sp_mask, new_eip);
2521
        POPL(ssp, sp, sp_mask, new_cs);
2522
        new_cs &= 0xffff;
2523
        POPL(ssp, sp, sp_mask, new_eflags);
2524
    } else {
2525
        /* 16 bits */
2526
        POPW(ssp, sp, sp_mask, new_eip);
2527
        POPW(ssp, sp, sp_mask, new_cs);
2528
        POPW(ssp, sp, sp_mask, new_eflags);
2529
    }
2530
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2531
    env->segs[R_CS].selector = new_cs;
2532
    env->segs[R_CS].base = (new_cs << 4);
2533
    env->eip = new_eip;
2534
    if (env->eflags & VM_MASK)
2535
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2536
    else
2537
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2538
    if (shift == 0)
2539
        eflags_mask &= 0xffff;
2540
    load_eflags(new_eflags, eflags_mask);
2541
    env->hflags2 &= ~HF2_NMI_MASK;
2542
}
2543

    
2544
static inline void validate_seg(int seg_reg, int cpl)
2545
{
2546
    int dpl;
2547
    uint32_t e2;
2548

    
2549
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2550
       they may still contain a valid base. I would be interested to
2551
       know how a real x86_64 CPU behaves */
2552
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2553
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2554
        return;
2555

    
2556
    e2 = env->segs[seg_reg].flags;
2557
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2558
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2559
        /* data or non conforming code segment */
2560
        if (dpl < cpl) {
2561
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2562
        }
2563
    }
2564
}
2565

    
2566
/* protected mode iret */
2567
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2568
{
2569
    uint32_t new_cs, new_eflags, new_ss;
2570
    uint32_t new_es, new_ds, new_fs, new_gs;
2571
    uint32_t e1, e2, ss_e1, ss_e2;
2572
    int cpl, dpl, rpl, eflags_mask, iopl;
2573
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2574

    
2575
#ifdef TARGET_X86_64
2576
    if (shift == 2)
2577
        sp_mask = -1;
2578
    else
2579
#endif
2580
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2581
    sp = ESP;
2582
    ssp = env->segs[R_SS].base;
2583
    new_eflags = 0; /* avoid warning */
2584
#ifdef TARGET_X86_64
2585
    if (shift == 2) {
2586
        POPQ(sp, new_eip);
2587
        POPQ(sp, new_cs);
2588
        new_cs &= 0xffff;
2589
        if (is_iret) {
2590
            POPQ(sp, new_eflags);
2591
        }
2592
    } else
2593
#endif
2594
    if (shift == 1) {
2595
        /* 32 bits */
2596
        POPL(ssp, sp, sp_mask, new_eip);
2597
        POPL(ssp, sp, sp_mask, new_cs);
2598
        new_cs &= 0xffff;
2599
        if (is_iret) {
2600
            POPL(ssp, sp, sp_mask, new_eflags);
2601
            if (new_eflags & VM_MASK)
2602
                goto return_to_vm86;
2603
        }
2604
    } else {
2605
        /* 16 bits */
2606
        POPW(ssp, sp, sp_mask, new_eip);
2607
        POPW(ssp, sp, sp_mask, new_cs);
2608
        if (is_iret)
2609
            POPW(ssp, sp, sp_mask, new_eflags);
2610
    }
2611
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2612
              new_cs, new_eip, shift, addend);
2613
    LOG_PCALL_STATE(env);
2614
    if ((new_cs & 0xfffc) == 0)
2615
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2616
    if (load_segment(&e1, &e2, new_cs) != 0)
2617
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2618
    if (!(e2 & DESC_S_MASK) ||
2619
        !(e2 & DESC_CS_MASK))
2620
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621
    cpl = env->hflags & HF_CPL_MASK;
2622
    rpl = new_cs & 3;
2623
    if (rpl < cpl)
2624
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2625
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2626
    if (e2 & DESC_C_MASK) {
2627
        if (dpl > rpl)
2628
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2629
    } else {
2630
        if (dpl != rpl)
2631
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2632
    }
2633
    if (!(e2 & DESC_P_MASK))
2634
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2635

    
2636
    sp += addend;
2637
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2638
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2639
        /* return to same privilege level */
2640
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2641
                       get_seg_base(e1, e2),
2642
                       get_seg_limit(e1, e2),
2643
                       e2);
2644
    } else {
2645
        /* return to different privilege level */
2646
#ifdef TARGET_X86_64
2647
        if (shift == 2) {
2648
            POPQ(sp, new_esp);
2649
            POPQ(sp, new_ss);
2650
            new_ss &= 0xffff;
2651
        } else
2652
#endif
2653
        if (shift == 1) {
2654
            /* 32 bits */
2655
            POPL(ssp, sp, sp_mask, new_esp);
2656
            POPL(ssp, sp, sp_mask, new_ss);
2657
            new_ss &= 0xffff;
2658
        } else {
2659
            /* 16 bits */
2660
            POPW(ssp, sp, sp_mask, new_esp);
2661
            POPW(ssp, sp, sp_mask, new_ss);
2662
        }
2663
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2664
                    new_ss, new_esp);
2665
        if ((new_ss & 0xfffc) == 0) {
2666
#ifdef TARGET_X86_64
2667
            /* NULL ss is allowed in long mode if cpl != 3*/
2668
            /* XXX: test CS64 ? */
2669
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2670
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2671
                                       0, 0xffffffff,
2672
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2673
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2674
                                       DESC_W_MASK | DESC_A_MASK);
2675
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2676
            } else
2677
#endif
2678
            {
2679
                raise_exception_err(EXCP0D_GPF, 0);
2680
            }
2681
        } else {
2682
            if ((new_ss & 3) != rpl)
2683
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2684
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2685
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2686
            if (!(ss_e2 & DESC_S_MASK) ||
2687
                (ss_e2 & DESC_CS_MASK) ||
2688
                !(ss_e2 & DESC_W_MASK))
2689
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2690
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2691
            if (dpl != rpl)
2692
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2693
            if (!(ss_e2 & DESC_P_MASK))
2694
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2695
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2696
                                   get_seg_base(ss_e1, ss_e2),
2697
                                   get_seg_limit(ss_e1, ss_e2),
2698
                                   ss_e2);
2699
        }
2700

    
2701
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2702
                       get_seg_base(e1, e2),
2703
                       get_seg_limit(e1, e2),
2704
                       e2);
2705
        cpu_x86_set_cpl(env, rpl);
2706
        sp = new_esp;
2707
#ifdef TARGET_X86_64
2708
        if (env->hflags & HF_CS64_MASK)
2709
            sp_mask = -1;
2710
        else
2711
#endif
2712
            sp_mask = get_sp_mask(ss_e2);
2713

    
2714
        /* validate data segments */
2715
        validate_seg(R_ES, rpl);
2716
        validate_seg(R_DS, rpl);
2717
        validate_seg(R_FS, rpl);
2718
        validate_seg(R_GS, rpl);
2719

    
2720
        sp += addend;
2721
    }
2722
    SET_ESP(sp, sp_mask);
2723
    env->eip = new_eip;
2724
    if (is_iret) {
2725
        /* NOTE: 'cpl' is the _old_ CPL */
2726
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2727
        if (cpl == 0)
2728
            eflags_mask |= IOPL_MASK;
2729
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2730
        if (cpl <= iopl)
2731
            eflags_mask |= IF_MASK;
2732
        if (shift == 0)
2733
            eflags_mask &= 0xffff;
2734
        load_eflags(new_eflags, eflags_mask);
2735
    }
2736
    return;
2737

    
2738
 return_to_vm86:
2739
    POPL(ssp, sp, sp_mask, new_esp);
2740
    POPL(ssp, sp, sp_mask, new_ss);
2741
    POPL(ssp, sp, sp_mask, new_es);
2742
    POPL(ssp, sp, sp_mask, new_ds);
2743
    POPL(ssp, sp, sp_mask, new_fs);
2744
    POPL(ssp, sp, sp_mask, new_gs);
2745

    
2746
    /* modify processor state */
2747
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2748
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2749
    load_seg_vm(R_CS, new_cs & 0xffff);
2750
    cpu_x86_set_cpl(env, 3);
2751
    load_seg_vm(R_SS, new_ss & 0xffff);
2752
    load_seg_vm(R_ES, new_es & 0xffff);
2753
    load_seg_vm(R_DS, new_ds & 0xffff);
2754
    load_seg_vm(R_FS, new_fs & 0xffff);
2755
    load_seg_vm(R_GS, new_gs & 0xffff);
2756

    
2757
    env->eip = new_eip & 0xffff;
2758
    ESP = new_esp;
2759
}
2760

    
2761
void helper_iret_protected(int shift, int next_eip)
2762
{
2763
    int tss_selector, type;
2764
    uint32_t e1, e2;
2765

    
2766
    /* specific case for TSS */
2767
    if (env->eflags & NT_MASK) {
2768
#ifdef TARGET_X86_64
2769
        if (env->hflags & HF_LMA_MASK)
2770
            raise_exception_err(EXCP0D_GPF, 0);
2771
#endif
2772
        tss_selector = lduw_kernel(env->tr.base + 0);
2773
        if (tss_selector & 4)
2774
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2775
        if (load_segment(&e1, &e2, tss_selector) != 0)
2776
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2777
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2778
        /* NOTE: we check both segment and busy TSS */
2779
        if (type != 3)
2780
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2781
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2782
    } else {
2783
        helper_ret_protected(shift, 1, 0);
2784
    }
2785
    env->hflags2 &= ~HF2_NMI_MASK;
2786
}
2787

    
2788
void helper_lret_protected(int shift, int addend)
2789
{
2790
    helper_ret_protected(shift, 0, addend);
2791
}
2792

    
2793
void helper_sysenter(void)
2794
{
2795
    if (env->sysenter_cs == 0) {
2796
        raise_exception_err(EXCP0D_GPF, 0);
2797
    }
2798
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2799
    cpu_x86_set_cpl(env, 0);
2800

    
2801
#ifdef TARGET_X86_64
2802
    if (env->hflags & HF_LMA_MASK) {
2803
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2804
                               0, 0xffffffff,
2805
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2806
                               DESC_S_MASK |
2807
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2808
    } else
2809
#endif
2810
    {
2811
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2812
                               0, 0xffffffff,
2813
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2814
                               DESC_S_MASK |
2815
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2816
    }
2817
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2818
                           0, 0xffffffff,
2819
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2820
                           DESC_S_MASK |
2821
                           DESC_W_MASK | DESC_A_MASK);
2822
    ESP = env->sysenter_esp;
2823
    EIP = env->sysenter_eip;
2824
}
2825

    
2826
void helper_sysexit(int dflag)
2827
{
2828
    int cpl;
2829

    
2830
    cpl = env->hflags & HF_CPL_MASK;
2831
    if (env->sysenter_cs == 0 || cpl != 0) {
2832
        raise_exception_err(EXCP0D_GPF, 0);
2833
    }
2834
    cpu_x86_set_cpl(env, 3);
2835
#ifdef TARGET_X86_64
2836
    if (dflag == 2) {
2837
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2838
                               0, 0xffffffff,
2839
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2842
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_W_MASK | DESC_A_MASK);
2847
    } else
2848
#endif
2849
    {
2850
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2851
                               0, 0xffffffff,
2852
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2855
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_W_MASK | DESC_A_MASK);
2860
    }
2861
    ESP = ECX;
2862
    EIP = EDX;
2863
}
2864

    
2865
#if defined(CONFIG_USER_ONLY)
2866
target_ulong helper_read_crN(int reg)
2867
{
2868
    return 0;
2869
}
2870

    
2871
void helper_write_crN(int reg, target_ulong t0)
2872
{
2873
}
2874

    
2875
void helper_movl_drN_T0(int reg, target_ulong t0)
2876
{
2877
}
2878
#else
2879
target_ulong helper_read_crN(int reg)
2880
{
2881
    target_ulong val;
2882

    
2883
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2884
    switch(reg) {
2885
    default:
2886
        val = env->cr[reg];
2887
        break;
2888
    case 8:
2889
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2890
            val = cpu_get_apic_tpr(env);
2891
        } else {
2892
            val = env->v_tpr;
2893
        }
2894
        break;
2895
    }
2896
    return val;
2897
}
2898

    
2899
void helper_write_crN(int reg, target_ulong t0)
2900
{
2901
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2902
    switch(reg) {
2903
    case 0:
2904
        cpu_x86_update_cr0(env, t0);
2905
        break;
2906
    case 3:
2907
        cpu_x86_update_cr3(env, t0);
2908
        break;
2909
    case 4:
2910
        cpu_x86_update_cr4(env, t0);
2911
        break;
2912
    case 8:
2913
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2914
            cpu_set_apic_tpr(env, t0);
2915
        }
2916
        env->v_tpr = t0 & 0x0f;
2917
        break;
2918
    default:
2919
        env->cr[reg] = t0;
2920
        break;
2921
    }
2922
}
2923

    
2924
void helper_movl_drN_T0(int reg, target_ulong t0)
2925
{
2926
    int i;
2927

    
2928
    if (reg < 4) {
2929
        hw_breakpoint_remove(env, reg);
2930
        env->dr[reg] = t0;
2931
        hw_breakpoint_insert(env, reg);
2932
    } else if (reg == 7) {
2933
        for (i = 0; i < 4; i++)
2934
            hw_breakpoint_remove(env, i);
2935
        env->dr[7] = t0;
2936
        for (i = 0; i < 4; i++)
2937
            hw_breakpoint_insert(env, i);
2938
    } else
2939
        env->dr[reg] = t0;
2940
}
2941
#endif
2942

    
2943
void helper_lmsw(target_ulong t0)
2944
{
2945
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2946
       if already set to one. */
2947
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2948
    helper_write_crN(0, t0);
2949
}
2950

    
2951
void helper_clts(void)
2952
{
2953
    env->cr[0] &= ~CR0_TS_MASK;
2954
    env->hflags &= ~HF_TS_MASK;
2955
}
2956

    
2957
void helper_invlpg(target_ulong addr)
2958
{
2959
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2960
    tlb_flush_page(env, addr);
2961
}
2962

    
2963
void helper_rdtsc(void)
2964
{
2965
    uint64_t val;
2966

    
2967
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2968
        raise_exception(EXCP0D_GPF);
2969
    }
2970
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2971

    
2972
    val = cpu_get_tsc(env) + env->tsc_offset;
2973
    EAX = (uint32_t)(val);
2974
    EDX = (uint32_t)(val >> 32);
2975
}
2976

    
2977
void helper_rdtscp(void)
2978
{
2979
    helper_rdtsc();
2980
    ECX = (uint32_t)(env->tsc_aux);
2981
}
2982

    
2983
void helper_rdpmc(void)
2984
{
2985
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2986
        raise_exception(EXCP0D_GPF);
2987
    }
2988
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2989
    
2990
    /* currently unimplemented */
2991
    raise_exception_err(EXCP06_ILLOP, 0);
2992
}
2993

    
2994
#if defined(CONFIG_USER_ONLY)
2995
void helper_wrmsr(void)
2996
{
2997
}
2998

    
2999
void helper_rdmsr(void)
3000
{
3001
}
3002
#else
3003
void helper_wrmsr(void)
3004
{
3005
    uint64_t val;
3006

    
3007
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3008

    
3009
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3010

    
3011
    switch((uint32_t)ECX) {
3012
    case MSR_IA32_SYSENTER_CS:
3013
        env->sysenter_cs = val & 0xffff;
3014
        break;
3015
    case MSR_IA32_SYSENTER_ESP:
3016
        env->sysenter_esp = val;
3017
        break;
3018
    case MSR_IA32_SYSENTER_EIP:
3019
        env->sysenter_eip = val;
3020
        break;
3021
    case MSR_IA32_APICBASE:
3022
        cpu_set_apic_base(env, val);
3023
        break;
3024
    case MSR_EFER:
3025
        {
3026
            uint64_t update_mask;
3027
            update_mask = 0;
3028
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3029
                update_mask |= MSR_EFER_SCE;
3030
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3031
                update_mask |= MSR_EFER_LME;
3032
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3033
                update_mask |= MSR_EFER_FFXSR;
3034
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3035
                update_mask |= MSR_EFER_NXE;
3036
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3037
                update_mask |= MSR_EFER_SVME;
3038
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3039
                update_mask |= MSR_EFER_FFXSR;
3040
            cpu_load_efer(env, (env->efer & ~update_mask) |
3041
                          (val & update_mask));
3042
        }
3043
        break;
3044
    case MSR_STAR:
3045
        env->star = val;
3046
        break;
3047
    case MSR_PAT:
3048
        env->pat = val;
3049
        break;
3050
    case MSR_VM_HSAVE_PA:
3051
        env->vm_hsave = val;
3052
        break;
3053
#ifdef TARGET_X86_64
3054
    case MSR_LSTAR:
3055
        env->lstar = val;
3056
        break;
3057
    case MSR_CSTAR:
3058
        env->cstar = val;
3059
        break;
3060
    case MSR_FMASK:
3061
        env->fmask = val;
3062
        break;
3063
    case MSR_FSBASE:
3064
        env->segs[R_FS].base = val;
3065
        break;
3066
    case MSR_GSBASE:
3067
        env->segs[R_GS].base = val;
3068
        break;
3069
    case MSR_KERNELGSBASE:
3070
        env->kernelgsbase = val;
3071
        break;
3072
#endif
3073
    case MSR_MTRRphysBase(0):
3074
    case MSR_MTRRphysBase(1):
3075
    case MSR_MTRRphysBase(2):
3076
    case MSR_MTRRphysBase(3):
3077
    case MSR_MTRRphysBase(4):
3078
    case MSR_MTRRphysBase(5):
3079
    case MSR_MTRRphysBase(6):
3080
    case MSR_MTRRphysBase(7):
3081
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3082
        break;
3083
    case MSR_MTRRphysMask(0):
3084
    case MSR_MTRRphysMask(1):
3085
    case MSR_MTRRphysMask(2):
3086
    case MSR_MTRRphysMask(3):
3087
    case MSR_MTRRphysMask(4):
3088
    case MSR_MTRRphysMask(5):
3089
    case MSR_MTRRphysMask(6):
3090
    case MSR_MTRRphysMask(7):
3091
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3092
        break;
3093
    case MSR_MTRRfix64K_00000:
3094
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3095
        break;
3096
    case MSR_MTRRfix16K_80000:
3097
    case MSR_MTRRfix16K_A0000:
3098
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3099
        break;
3100
    case MSR_MTRRfix4K_C0000:
3101
    case MSR_MTRRfix4K_C8000:
3102
    case MSR_MTRRfix4K_D0000:
3103
    case MSR_MTRRfix4K_D8000:
3104
    case MSR_MTRRfix4K_E0000:
3105
    case MSR_MTRRfix4K_E8000:
3106
    case MSR_MTRRfix4K_F0000:
3107
    case MSR_MTRRfix4K_F8000:
3108
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3109
        break;
3110
    case MSR_MTRRdefType:
3111
        env->mtrr_deftype = val;
3112
        break;
3113
    case MSR_MCG_STATUS:
3114
        env->mcg_status = val;
3115
        break;
3116
    case MSR_MCG_CTL:
3117
        if ((env->mcg_cap & MCG_CTL_P)
3118
            && (val == 0 || val == ~(uint64_t)0))
3119
            env->mcg_ctl = val;
3120
        break;
3121
    case MSR_TSC_AUX:
3122
        env->tsc_aux = val;
3123
        break;
3124
    default:
3125
        if ((uint32_t)ECX >= MSR_MC0_CTL
3126
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3127
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3128
            if ((offset & 0x3) != 0
3129
                || (val == 0 || val == ~(uint64_t)0))
3130
                env->mce_banks[offset] = val;
3131
            break;
3132
        }
3133
        /* XXX: exception ? */
3134
        break;
3135
    }
3136
}
3137

    
3138
void helper_rdmsr(void)
3139
{
3140
    uint64_t val;
3141

    
3142
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3143

    
3144
    switch((uint32_t)ECX) {
3145
    case MSR_IA32_SYSENTER_CS:
3146
        val = env->sysenter_cs;
3147
        break;
3148
    case MSR_IA32_SYSENTER_ESP:
3149
        val = env->sysenter_esp;
3150
        break;
3151
    case MSR_IA32_SYSENTER_EIP:
3152
        val = env->sysenter_eip;
3153
        break;
3154
    case MSR_IA32_APICBASE:
3155
        val = cpu_get_apic_base(env);
3156
        break;
3157
    case MSR_EFER:
3158
        val = env->efer;
3159
        break;
3160
    case MSR_STAR:
3161
        val = env->star;
3162
        break;
3163
    case MSR_PAT:
3164
        val = env->pat;
3165
        break;
3166
    case MSR_VM_HSAVE_PA:
3167
        val = env->vm_hsave;
3168
        break;
3169
    case MSR_IA32_PERF_STATUS:
3170
        /* tsc_increment_by_tick */
3171
        val = 1000ULL;
3172
        /* CPU multiplier */
3173
        val |= (((uint64_t)4ULL) << 40);
3174
        break;
3175
#ifdef TARGET_X86_64
3176
    case MSR_LSTAR:
3177
        val = env->lstar;
3178
        break;
3179
    case MSR_CSTAR:
3180
        val = env->cstar;
3181
        break;
3182
    case MSR_FMASK:
3183
        val = env->fmask;
3184
        break;
3185
    case MSR_FSBASE:
3186
        val = env->segs[R_FS].base;
3187
        break;
3188
    case MSR_GSBASE:
3189
        val = env->segs[R_GS].base;
3190
        break;
3191
    case MSR_KERNELGSBASE:
3192
        val = env->kernelgsbase;
3193
        break;
3194
    case MSR_TSC_AUX:
3195
        val = env->tsc_aux;
3196
        break;
3197
#endif
3198
    case MSR_MTRRphysBase(0):
3199
    case MSR_MTRRphysBase(1):
3200
    case MSR_MTRRphysBase(2):
3201
    case MSR_MTRRphysBase(3):
3202
    case MSR_MTRRphysBase(4):
3203
    case MSR_MTRRphysBase(5):
3204
    case MSR_MTRRphysBase(6):
3205
    case MSR_MTRRphysBase(7):
3206
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3207
        break;
3208
    case MSR_MTRRphysMask(0):
3209
    case MSR_MTRRphysMask(1):
3210
    case MSR_MTRRphysMask(2):
3211
    case MSR_MTRRphysMask(3):
3212
    case MSR_MTRRphysMask(4):
3213
    case MSR_MTRRphysMask(5):
3214
    case MSR_MTRRphysMask(6):
3215
    case MSR_MTRRphysMask(7):
3216
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3217
        break;
3218
    case MSR_MTRRfix64K_00000:
3219
        val = env->mtrr_fixed[0];
3220
        break;
3221
    case MSR_MTRRfix16K_80000:
3222
    case MSR_MTRRfix16K_A0000:
3223
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3224
        break;
3225
    case MSR_MTRRfix4K_C0000:
3226
    case MSR_MTRRfix4K_C8000:
3227
    case MSR_MTRRfix4K_D0000:
3228
    case MSR_MTRRfix4K_D8000:
3229
    case MSR_MTRRfix4K_E0000:
3230
    case MSR_MTRRfix4K_E8000:
3231
    case MSR_MTRRfix4K_F0000:
3232
    case MSR_MTRRfix4K_F8000:
3233
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3234
        break;
3235
    case MSR_MTRRdefType:
3236
        val = env->mtrr_deftype;
3237
        break;
3238
    case MSR_MTRRcap:
3239
        if (env->cpuid_features & CPUID_MTRR)
3240
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3241
        else
3242
            /* XXX: exception ? */
3243
            val = 0;
3244
        break;
3245
    case MSR_MCG_CAP:
3246
        val = env->mcg_cap;
3247
        break;
3248
    case MSR_MCG_CTL:
3249
        if (env->mcg_cap & MCG_CTL_P)
3250
            val = env->mcg_ctl;
3251
        else
3252
            val = 0;
3253
        break;
3254
    case MSR_MCG_STATUS:
3255
        val = env->mcg_status;
3256
        break;
3257
    default:
3258
        if ((uint32_t)ECX >= MSR_MC0_CTL
3259
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3260
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3261
            val = env->mce_banks[offset];
3262
            break;
3263
        }
3264
        /* XXX: exception ? */
3265
        val = 0;
3266
        break;
3267
    }
3268
    EAX = (uint32_t)(val);
3269
    EDX = (uint32_t)(val >> 32);
3270
}
3271
#endif
3272

    
3273
target_ulong helper_lsl(target_ulong selector1)
3274
{
3275
    unsigned int limit;
3276
    uint32_t e1, e2, eflags, selector;
3277
    int rpl, dpl, cpl, type;
3278

    
3279
    selector = selector1 & 0xffff;
3280
    eflags = helper_cc_compute_all(CC_OP);
3281
    if ((selector & 0xfffc) == 0)
3282
        goto fail;
3283
    if (load_segment(&e1, &e2, selector) != 0)
3284
        goto fail;
3285
    rpl = selector & 3;
3286
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3287
    cpl = env->hflags & HF_CPL_MASK;
3288
    if (e2 & DESC_S_MASK) {
3289
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3290
            /* conforming */
3291
        } else {
3292
            if (dpl < cpl || dpl < rpl)
3293
                goto fail;
3294
        }
3295
    } else {
3296
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3297
        switch(type) {
3298
        case 1:
3299
        case 2:
3300
        case 3:
3301
        case 9:
3302
        case 11:
3303
            break;
3304
        default:
3305
            goto fail;
3306
        }
3307
        if (dpl < cpl || dpl < rpl) {
3308
        fail:
3309
            CC_SRC = eflags & ~CC_Z;
3310
            return 0;
3311
        }
3312
    }
3313
    limit = get_seg_limit(e1, e2);
3314
    CC_SRC = eflags | CC_Z;
3315
    return limit;
3316
}
3317

    
3318
target_ulong helper_lar(target_ulong selector1)
3319
{
3320
    uint32_t e1, e2, eflags, selector;
3321
    int rpl, dpl, cpl, type;
3322

    
3323
    selector = selector1 & 0xffff;
3324
    eflags = helper_cc_compute_all(CC_OP);
3325
    if ((selector & 0xfffc) == 0)
3326
        goto fail;
3327
    if (load_segment(&e1, &e2, selector) != 0)
3328
        goto fail;
3329
    rpl = selector & 3;
3330
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3331
    cpl = env->hflags & HF_CPL_MASK;
3332
    if (e2 & DESC_S_MASK) {
3333
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3334
            /* conforming */
3335
        } else {
3336
            if (dpl < cpl || dpl < rpl)
3337
                goto fail;
3338
        }
3339
    } else {
3340
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3341
        switch(type) {
3342
        case 1:
3343
        case 2:
3344
        case 3:
3345
        case 4:
3346
        case 5:
3347
        case 9:
3348
        case 11:
3349
        case 12:
3350
            break;
3351
        default:
3352
            goto fail;
3353
        }
3354
        if (dpl < cpl || dpl < rpl) {
3355
        fail:
3356
            CC_SRC = eflags & ~CC_Z;
3357
            return 0;
3358
        }
3359
    }
3360
    CC_SRC = eflags | CC_Z;
3361
    return e2 & 0x00f0ff00;
3362
}
3363

    
3364
void helper_verr(target_ulong selector1)
3365
{
3366
    uint32_t e1, e2, eflags, selector;
3367
    int rpl, dpl, cpl;
3368

    
3369
    selector = selector1 & 0xffff;
3370
    eflags = helper_cc_compute_all(CC_OP);
3371
    if ((selector & 0xfffc) == 0)
3372
        goto fail;
3373
    if (load_segment(&e1, &e2, selector) != 0)
3374
        goto fail;
3375
    if (!(e2 & DESC_S_MASK))
3376
        goto fail;
3377
    rpl = selector & 3;
3378
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3379
    cpl = env->hflags & HF_CPL_MASK;
3380
    if (e2 & DESC_CS_MASK) {
3381
        if (!(e2 & DESC_R_MASK))
3382
            goto fail;
3383
        if (!(e2 & DESC_C_MASK)) {
3384
            if (dpl < cpl || dpl < rpl)
3385
                goto fail;
3386
        }
3387
    } else {
3388
        if (dpl < cpl || dpl < rpl) {
3389
        fail:
3390
            CC_SRC = eflags & ~CC_Z;
3391
            return;
3392
        }
3393
    }
3394
    CC_SRC = eflags | CC_Z;
3395
}
3396

    
3397
void helper_verw(target_ulong selector1)
3398
{
3399
    uint32_t e1, e2, eflags, selector;
3400
    int rpl, dpl, cpl;
3401

    
3402
    selector = selector1 & 0xffff;
3403
    eflags = helper_cc_compute_all(CC_OP);
3404
    if ((selector & 0xfffc) == 0)
3405
        goto fail;
3406
    if (load_segment(&e1, &e2, selector) != 0)
3407
        goto fail;
3408
    if (!(e2 & DESC_S_MASK))
3409
        goto fail;
3410
    rpl = selector & 3;
3411
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3412
    cpl = env->hflags & HF_CPL_MASK;
3413
    if (e2 & DESC_CS_MASK) {
3414
        goto fail;
3415
    } else {
3416
        if (dpl < cpl || dpl < rpl)
3417
            goto fail;
3418
        if (!(e2 & DESC_W_MASK)) {
3419
        fail:
3420
            CC_SRC = eflags & ~CC_Z;
3421
            return;
3422
        }
3423
    }
3424
    CC_SRC = eflags | CC_Z;
3425
}
3426

    
3427
/* x87 FPU helpers */
3428

    
3429
static void fpu_set_exception(int mask)
3430
{
3431
    env->fpus |= mask;
3432
    if (env->fpus & (~env->fpuc & FPUC_EM))
3433
        env->fpus |= FPUS_SE | FPUS_B;
3434
}
3435

    
3436
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3437
{
3438
    if (b == 0.0)
3439
        fpu_set_exception(FPUS_ZE);
3440
    return a / b;
3441
}
3442

    
3443
static void fpu_raise_exception(void)
3444
{
3445
    if (env->cr[0] & CR0_NE_MASK) {
3446
        raise_exception(EXCP10_COPR);
3447
    }
3448
#if !defined(CONFIG_USER_ONLY)
3449
    else {
3450
        cpu_set_ferr(env);
3451
    }
3452
#endif
3453
}
3454

    
3455
void helper_flds_FT0(uint32_t val)
3456
{
3457
    union {
3458
        float32 f;
3459
        uint32_t i;
3460
    } u;
3461
    u.i = val;
3462
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3463
}
3464

    
3465
void helper_fldl_FT0(uint64_t val)
3466
{
3467
    union {
3468
        float64 f;
3469
        uint64_t i;
3470
    } u;
3471
    u.i = val;
3472
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3473
}
3474

    
3475
void helper_fildl_FT0(int32_t val)
3476
{
3477
    FT0 = int32_to_floatx(val, &env->fp_status);
3478
}
3479

    
3480
void helper_flds_ST0(uint32_t val)
3481
{
3482
    int new_fpstt;
3483
    union {
3484
        float32 f;
3485
        uint32_t i;
3486
    } u;
3487
    new_fpstt = (env->fpstt - 1) & 7;
3488
    u.i = val;
3489
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3490
    env->fpstt = new_fpstt;
3491
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3492
}
3493

    
3494
void helper_fldl_ST0(uint64_t val)
3495
{
3496
    int new_fpstt;
3497
    union {
3498
        float64 f;
3499
        uint64_t i;
3500
    } u;
3501
    new_fpstt = (env->fpstt - 1) & 7;
3502
    u.i = val;
3503
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3504
    env->fpstt = new_fpstt;
3505
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3506
}
3507

    
3508
void helper_fildl_ST0(int32_t val)
3509
{
3510
    int new_fpstt;
3511
    new_fpstt = (env->fpstt - 1) & 7;
3512
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3513
    env->fpstt = new_fpstt;
3514
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3515
}
3516

    
3517
void helper_fildll_ST0(int64_t val)
3518
{
3519
    int new_fpstt;
3520
    new_fpstt = (env->fpstt - 1) & 7;
3521
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3522
    env->fpstt = new_fpstt;
3523
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3524
}
3525

    
3526
uint32_t helper_fsts_ST0(void)
3527
{
3528
    union {
3529
        float32 f;
3530
        uint32_t i;
3531
    } u;
3532
    u.f = floatx_to_float32(ST0, &env->fp_status);
3533
    return u.i;
3534
}
3535

    
3536
uint64_t helper_fstl_ST0(void)
3537
{
3538
    union {
3539
        float64 f;
3540
        uint64_t i;
3541
    } u;
3542
    u.f = floatx_to_float64(ST0, &env->fp_status);
3543
    return u.i;
3544
}
3545

    
3546
int32_t helper_fist_ST0(void)
3547
{
3548
    int32_t val;
3549
    val = floatx_to_int32(ST0, &env->fp_status);
3550
    if (val != (int16_t)val)
3551
        val = -32768;
3552
    return val;
3553
}
3554

    
3555
int32_t helper_fistl_ST0(void)
3556
{
3557
    int32_t val;
3558
    val = floatx_to_int32(ST0, &env->fp_status);
3559
    return val;
3560
}
3561

    
3562
int64_t helper_fistll_ST0(void)
3563
{
3564
    int64_t val;
3565
    val = floatx_to_int64(ST0, &env->fp_status);
3566
    return val;
3567
}
3568

    
3569
int32_t helper_fistt_ST0(void)
3570
{
3571
    int32_t val;
3572
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3573
    if (val != (int16_t)val)
3574
        val = -32768;
3575
    return val;
3576
}
3577

    
3578
int32_t helper_fisttl_ST0(void)
3579
{
3580
    int32_t val;
3581
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3582
    return val;
3583
}
3584

    
3585
int64_t helper_fisttll_ST0(void)
3586
{
3587
    int64_t val;
3588
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3589
    return val;
3590
}
3591

    
3592
void helper_fldt_ST0(target_ulong ptr)
3593
{
3594
    int new_fpstt;
3595
    new_fpstt = (env->fpstt - 1) & 7;
3596
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3597
    env->fpstt = new_fpstt;
3598
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3599
}
3600

    
3601
void helper_fstt_ST0(target_ulong ptr)
3602
{
3603
    helper_fstt(ST0, ptr);
3604
}
3605

    
3606
void helper_fpush(void)
3607
{
3608
    fpush();
3609
}
3610

    
3611
void helper_fpop(void)
3612
{
3613
    fpop();
3614
}
3615

    
3616
void helper_fdecstp(void)
3617
{
3618
    env->fpstt = (env->fpstt - 1) & 7;
3619
    env->fpus &= (~0x4700);
3620
}
3621

    
3622
void helper_fincstp(void)
3623
{
3624
    env->fpstt = (env->fpstt + 1) & 7;
3625
    env->fpus &= (~0x4700);
3626
}
3627

    
3628
/* FPU move */
3629

    
3630
void helper_ffree_STN(int st_index)
3631
{
3632
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3633
}
3634

    
3635
void helper_fmov_ST0_FT0(void)
3636
{
3637
    ST0 = FT0;
3638
}
3639

    
3640
void helper_fmov_FT0_STN(int st_index)
3641
{
3642
    FT0 = ST(st_index);
3643
}
3644

    
3645
void helper_fmov_ST0_STN(int st_index)
3646
{
3647
    ST0 = ST(st_index);
3648
}
3649

    
3650
void helper_fmov_STN_ST0(int st_index)
3651
{
3652
    ST(st_index) = ST0;
3653
}
3654

    
3655
void helper_fxchg_ST0_STN(int st_index)
3656
{
3657
    CPU86_LDouble tmp;
3658
    tmp = ST(st_index);
3659
    ST(st_index) = ST0;
3660
    ST0 = tmp;
3661
}
3662

    
3663
/* FPU operations */
3664

    
3665
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3666

    
3667
void helper_fcom_ST0_FT0(void)
3668
{
3669
    int ret;
3670

    
3671
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3672
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3673
}
3674

    
3675
void helper_fucom_ST0_FT0(void)
3676
{
3677
    int ret;
3678

    
3679
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3680
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3681
}
3682

    
3683
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3684

    
3685
void helper_fcomi_ST0_FT0(void)
3686
{
3687
    int eflags;
3688
    int ret;
3689

    
3690
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3691
    eflags = helper_cc_compute_all(CC_OP);
3692
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3693
    CC_SRC = eflags;
3694
}
3695

    
3696
void helper_fucomi_ST0_FT0(void)
3697
{
3698
    int eflags;
3699
    int ret;
3700

    
3701
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3702
    eflags = helper_cc_compute_all(CC_OP);
3703
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3704
    CC_SRC = eflags;
3705
}
3706

    
3707
void helper_fadd_ST0_FT0(void)
3708
{
3709
    ST0 += FT0;
3710
}
3711

    
3712
void helper_fmul_ST0_FT0(void)
3713
{
3714
    ST0 *= FT0;
3715
}
3716

    
3717
void helper_fsub_ST0_FT0(void)
3718
{
3719
    ST0 -= FT0;
3720
}
3721

    
3722
void helper_fsubr_ST0_FT0(void)
3723
{
3724
    ST0 = FT0 - ST0;
3725
}
3726

    
3727
void helper_fdiv_ST0_FT0(void)
3728
{
3729
    ST0 = helper_fdiv(ST0, FT0);
3730
}
3731

    
3732
void helper_fdivr_ST0_FT0(void)
3733
{
3734
    ST0 = helper_fdiv(FT0, ST0);
3735
}
3736

    
3737
/* fp operations between STN and ST0 */
3738

    
3739
void helper_fadd_STN_ST0(int st_index)
3740
{
3741
    ST(st_index) += ST0;
3742
}
3743

    
3744
void helper_fmul_STN_ST0(int st_index)
3745
{
3746
    ST(st_index) *= ST0;
3747
}
3748

    
3749
void helper_fsub_STN_ST0(int st_index)
3750
{
3751
    ST(st_index) -= ST0;
3752
}
3753

    
3754
void helper_fsubr_STN_ST0(int st_index)
3755
{
3756
    CPU86_LDouble *p;
3757
    p = &ST(st_index);
3758
    *p = ST0 - *p;
3759
}
3760

    
3761
void helper_fdiv_STN_ST0(int st_index)
3762
{
3763
    CPU86_LDouble *p;
3764
    p = &ST(st_index);
3765
    *p = helper_fdiv(*p, ST0);
3766
}
3767

    
3768
void helper_fdivr_STN_ST0(int st_index)
3769
{
3770
    CPU86_LDouble *p;
3771
    p = &ST(st_index);
3772
    *p = helper_fdiv(ST0, *p);
3773
}
3774

    
3775
/* misc FPU operations */
3776
void helper_fchs_ST0(void)
3777
{
3778
    ST0 = floatx_chs(ST0);
3779
}
3780

    
3781
void helper_fabs_ST0(void)
3782
{
3783
    ST0 = floatx_abs(ST0);
3784
}
3785

    
3786
void helper_fld1_ST0(void)
3787
{
3788
    ST0 = f15rk[1];
3789
}
3790

    
3791
void helper_fldl2t_ST0(void)
3792
{
3793
    ST0 = f15rk[6];
3794
}
3795

    
3796
void helper_fldl2e_ST0(void)
3797
{
3798
    ST0 = f15rk[5];
3799
}
3800

    
3801
void helper_fldpi_ST0(void)
3802
{
3803
    ST0 = f15rk[2];
3804
}
3805

    
3806
void helper_fldlg2_ST0(void)
3807
{
3808
    ST0 = f15rk[3];
3809
}
3810

    
3811
void helper_fldln2_ST0(void)
3812
{
3813
    ST0 = f15rk[4];
3814
}
3815

    
3816
void helper_fldz_ST0(void)
3817
{
3818
    ST0 = f15rk[0];
3819
}
3820

    
3821
void helper_fldz_FT0(void)
3822
{
3823
    FT0 = f15rk[0];
3824
}
3825

    
3826
uint32_t helper_fnstsw(void)
3827
{
3828
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3829
}
3830

    
3831
uint32_t helper_fnstcw(void)
3832
{
3833
    return env->fpuc;
3834
}
3835

    
3836
static void update_fp_status(void)
3837
{
3838
    int rnd_type;
3839

    
3840
    /* set rounding mode */
3841
    switch(env->fpuc & RC_MASK) {
3842
    default:
3843
    case RC_NEAR:
3844
        rnd_type = float_round_nearest_even;
3845
        break;
3846
    case RC_DOWN:
3847
        rnd_type = float_round_down;
3848
        break;
3849
    case RC_UP:
3850
        rnd_type = float_round_up;
3851
        break;
3852
    case RC_CHOP:
3853
        rnd_type = float_round_to_zero;
3854
        break;
3855
    }
3856
    set_float_rounding_mode(rnd_type, &env->fp_status);
3857
#ifdef FLOATX80
3858
    switch((env->fpuc >> 8) & 3) {
3859
    case 0:
3860
        rnd_type = 32;
3861
        break;
3862
    case 2:
3863
        rnd_type = 64;
3864
        break;
3865
    case 3:
3866
    default:
3867
        rnd_type = 80;
3868
        break;
3869
    }
3870
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3871
#endif
3872
}
3873

    
3874
void helper_fldcw(uint32_t val)
3875
{
3876
    env->fpuc = val;
3877
    update_fp_status();
3878
}
3879

    
3880
void helper_fclex(void)
3881
{
3882
    env->fpus &= 0x7f00;
3883
}
3884

    
3885
void helper_fwait(void)
3886
{
3887
    if (env->fpus & FPUS_SE)
3888
        fpu_raise_exception();
3889
}
3890

    
3891
void helper_fninit(void)
3892
{
3893
    env->fpus = 0;
3894
    env->fpstt = 0;
3895
    env->fpuc = 0x37f;
3896
    env->fptags[0] = 1;
3897
    env->fptags[1] = 1;
3898
    env->fptags[2] = 1;
3899
    env->fptags[3] = 1;
3900
    env->fptags[4] = 1;
3901
    env->fptags[5] = 1;
3902
    env->fptags[6] = 1;
3903
    env->fptags[7] = 1;
3904
}
3905

    
3906
/* BCD ops */
3907

    
3908
void helper_fbld_ST0(target_ulong ptr)
3909
{
3910
    CPU86_LDouble tmp;
3911
    uint64_t val;
3912
    unsigned int v;
3913
    int i;
3914

    
3915
    val = 0;
3916
    for(i = 8; i >= 0; i--) {
3917
        v = ldub(ptr + i);
3918
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3919
    }
3920
    tmp = val;
3921
    if (ldub(ptr + 9) & 0x80)
3922
        tmp = -tmp;
3923
    fpush();
3924
    ST0 = tmp;
3925
}
3926

    
3927
void helper_fbst_ST0(target_ulong ptr)
3928
{
3929
    int v;
3930
    target_ulong mem_ref, mem_end;
3931
    int64_t val;
3932

    
3933
    val = floatx_to_int64(ST0, &env->fp_status);
3934
    mem_ref = ptr;
3935
    mem_end = mem_ref + 9;
3936
    if (val < 0) {
3937
        stb(mem_end, 0x80);
3938
        val = -val;
3939
    } else {
3940
        stb(mem_end, 0x00);
3941
    }
3942
    while (mem_ref < mem_end) {
3943
        if (val == 0)
3944
            break;
3945
        v = val % 100;
3946
        val = val / 100;
3947
        v = ((v / 10) << 4) | (v % 10);
3948
        stb(mem_ref++, v);
3949
    }
3950
    while (mem_ref < mem_end) {
3951
        stb(mem_ref++, 0);
3952
    }
3953
}
3954

    
3955
void helper_f2xm1(void)
3956
{
3957
    ST0 = pow(2.0,ST0) - 1.0;
3958
}
3959

    
3960
void helper_fyl2x(void)
3961
{
3962
    CPU86_LDouble fptemp;
3963

    
3964
    fptemp = ST0;
3965
    if (fptemp>0.0){
3966
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3967
        ST1 *= fptemp;
3968
        fpop();
3969
    } else {
3970
        env->fpus &= (~0x4700);
3971
        env->fpus |= 0x400;
3972
    }
3973
}
3974

    
3975
void helper_fptan(void)
3976
{
3977
    CPU86_LDouble fptemp;
3978

    
3979
    fptemp = ST0;
3980
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3981
        env->fpus |= 0x400;
3982
    } else {
3983
        ST0 = tan(fptemp);
3984
        fpush();
3985
        ST0 = 1.0;
3986
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3987
        /* the above code is for  |arg| < 2**52 only */
3988
    }
3989
}
3990

    
3991
void helper_fpatan(void)
3992
{
3993
    CPU86_LDouble fptemp, fpsrcop;
3994

    
3995
    fpsrcop = ST1;
3996
    fptemp = ST0;
3997
    ST1 = atan2(fpsrcop,fptemp);
3998
    fpop();
3999
}
4000

    
4001
void helper_fxtract(void)
4002
{
4003
    CPU86_LDoubleU temp;
4004
    unsigned int expdif;
4005

    
4006
    temp.d = ST0;
4007
    expdif = EXPD(temp) - EXPBIAS;
4008
    /*DP exponent bias*/
4009
    ST0 = expdif;
4010
    fpush();
4011
    BIASEXPONENT(temp);
4012
    ST0 = temp.d;
4013
}
4014

    
4015
void helper_fprem1(void)
4016
{
4017
    CPU86_LDouble dblq, fpsrcop, fptemp;
4018
    CPU86_LDoubleU fpsrcop1, fptemp1;
4019
    int expdif;
4020
    signed long long int q;
4021

    
4022
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4023
        ST0 = 0.0 / 0.0; /* NaN */
4024
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4025
        return;
4026
    }
4027

    
4028
    fpsrcop = ST0;
4029
    fptemp = ST1;
4030
    fpsrcop1.d = fpsrcop;
4031
    fptemp1.d = fptemp;
4032
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4033

    
4034
    if (expdif < 0) {
4035
        /* optimisation? taken from the AMD docs */
4036
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4037
        /* ST0 is unchanged */
4038
        return;
4039
    }
4040

    
4041
    if (expdif < 53) {
4042
        dblq = fpsrcop / fptemp;
4043
        /* round dblq towards nearest integer */
4044
        dblq = rint(dblq);
4045
        ST0 = fpsrcop - fptemp * dblq;
4046

    
4047
        /* convert dblq to q by truncating towards zero */
4048
        if (dblq < 0.0)
4049
           q = (signed long long int)(-dblq);
4050
        else
4051
           q = (signed long long int)dblq;
4052

    
4053
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4054
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4055
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4056
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4057
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4058
    } else {
4059
        env->fpus |= 0x400;  /* C2 <-- 1 */
4060
        fptemp = pow(2.0, expdif - 50);
4061
        fpsrcop = (ST0 / ST1) / fptemp;
4062
        /* fpsrcop = integer obtained by chopping */
4063
        fpsrcop = (fpsrcop < 0.0) ?
4064
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4065
        ST0 -= (ST1 * fpsrcop * fptemp);
4066
    }
4067
}
4068

    
4069
void helper_fprem(void)
4070
{
4071
    CPU86_LDouble dblq, fpsrcop, fptemp;
4072
    CPU86_LDoubleU fpsrcop1, fptemp1;
4073
    int expdif;
4074
    signed long long int q;
4075

    
4076
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4077
       ST0 = 0.0 / 0.0; /* NaN */
4078
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4079
       return;
4080
    }
4081

    
4082
    fpsrcop = (CPU86_LDouble)ST0;
4083
    fptemp = (CPU86_LDouble)ST1;
4084
    fpsrcop1.d = fpsrcop;
4085
    fptemp1.d = fptemp;
4086
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4087

    
4088
    if (expdif < 0) {
4089
        /* optimisation? taken from the AMD docs */
4090
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4091
        /* ST0 is unchanged */
4092
        return;
4093
    }
4094

    
4095
    if ( expdif < 53 ) {
4096
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4097
        /* round dblq towards zero */
4098
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4099
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4100

    
4101
        /* convert dblq to q by truncating towards zero */
4102
        if (dblq < 0.0)
4103
           q = (signed long long int)(-dblq);
4104
        else
4105
           q = (signed long long int)dblq;
4106

    
4107
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4108
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4109
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4110
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4111
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4112
    } else {
4113
        int N = 32 + (expdif % 32); /* as per AMD docs */
4114
        env->fpus |= 0x400;  /* C2 <-- 1 */
4115
        fptemp = pow(2.0, (double)(expdif - N));
4116
        fpsrcop = (ST0 / ST1) / fptemp;
4117
        /* fpsrcop = integer obtained by chopping */
4118
        fpsrcop = (fpsrcop < 0.0) ?
4119
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4120
        ST0 -= (ST1 * fpsrcop * fptemp);
4121
    }
4122
}
4123

    
4124
void helper_fyl2xp1(void)
4125
{
4126
    CPU86_LDouble fptemp;
4127

    
4128
    fptemp = ST0;
4129
    if ((fptemp+1.0)>0.0) {
4130
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4131
        ST1 *= fptemp;
4132
        fpop();
4133
    } else {
4134
        env->fpus &= (~0x4700);
4135
        env->fpus |= 0x400;
4136
    }
4137
}
4138

    
4139
void helper_fsqrt(void)
4140
{
4141
    CPU86_LDouble fptemp;
4142

    
4143
    fptemp = ST0;
4144
    if (fptemp<0.0) {
4145
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4146
        env->fpus |= 0x400;
4147
    }
4148
    ST0 = sqrt(fptemp);
4149
}
4150

    
4151
void helper_fsincos(void)
4152
{
4153
    CPU86_LDouble fptemp;
4154

    
4155
    fptemp = ST0;
4156
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4157
        env->fpus |= 0x400;
4158
    } else {
4159
        ST0 = sin(fptemp);
4160
        fpush();
4161
        ST0 = cos(fptemp);
4162
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4163
        /* the above code is for  |arg| < 2**63 only */
4164
    }
4165
}
4166

    
4167
void helper_frndint(void)
4168
{
4169
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4170
}
4171

    
4172
void helper_fscale(void)
4173
{
4174
    ST0 = ldexp (ST0, (int)(ST1));
4175
}
4176

    
4177
void helper_fsin(void)
4178
{
4179
    CPU86_LDouble fptemp;
4180

    
4181
    fptemp = ST0;
4182
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4183
        env->fpus |= 0x400;
4184
    } else {
4185
        ST0 = sin(fptemp);
4186
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4187
        /* the above code is for  |arg| < 2**53 only */
4188
    }
4189
}
4190

    
4191
void helper_fcos(void)
4192
{
4193
    CPU86_LDouble fptemp;
4194

    
4195
    fptemp = ST0;
4196
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4197
        env->fpus |= 0x400;
4198
    } else {
4199
        ST0 = cos(fptemp);
4200
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4201
        /* the above code is for  |arg5 < 2**63 only */
4202
    }
4203
}
4204

    
4205
void helper_fxam_ST0(void)
4206
{
4207
    CPU86_LDoubleU temp;
4208
    int expdif;
4209

    
4210
    temp.d = ST0;
4211

    
4212
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4213
    if (SIGND(temp))
4214
        env->fpus |= 0x200; /* C1 <-- 1 */
4215

    
4216
    /* XXX: test fptags too */
4217
    expdif = EXPD(temp);
4218
    if (expdif == MAXEXPD) {
4219
#ifdef USE_X86LDOUBLE
4220
        if (MANTD(temp) == 0x8000000000000000ULL)
4221
#else
4222
        if (MANTD(temp) == 0)
4223
#endif
4224
            env->fpus |=  0x500 /*Infinity*/;
4225
        else
4226
            env->fpus |=  0x100 /*NaN*/;
4227
    } else if (expdif == 0) {
4228
        if (MANTD(temp) == 0)
4229
            env->fpus |=  0x4000 /*Zero*/;
4230
        else
4231
            env->fpus |= 0x4400 /*Denormal*/;
4232
    } else {
4233
        env->fpus |= 0x400;
4234
    }
4235
}
4236

    
4237
void helper_fstenv(target_ulong ptr, int data32)
4238
{
4239
    int fpus, fptag, exp, i;
4240
    uint64_t mant;
4241
    CPU86_LDoubleU tmp;
4242

    
4243
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4244
    fptag = 0;
4245
    for (i=7; i>=0; i--) {
4246
        fptag <<= 2;
4247
        if (env->fptags[i]) {
4248
            fptag |= 3;
4249
        } else {
4250
            tmp.d = env->fpregs[i].d;
4251
            exp = EXPD(tmp);
4252
            mant = MANTD(tmp);
4253
            if (exp == 0 && mant == 0) {
4254
                /* zero */
4255
                fptag |= 1;
4256
            } else if (exp == 0 || exp == MAXEXPD
4257
#ifdef USE_X86LDOUBLE
4258
                       || (mant & (1LL << 63)) == 0
4259
#endif
4260
                       ) {
4261
                /* NaNs, infinity, denormal */
4262
                fptag |= 2;
4263
            }
4264
        }
4265
    }
4266
    if (data32) {
4267
        /* 32 bit */
4268
        stl(ptr, env->fpuc);
4269
        stl(ptr + 4, fpus);
4270
        stl(ptr + 8, fptag);
4271
        stl(ptr + 12, 0); /* fpip */
4272
        stl(ptr + 16, 0); /* fpcs */
4273
        stl(ptr + 20, 0); /* fpoo */
4274
        stl(ptr + 24, 0); /* fpos */
4275
    } else {
4276
        /* 16 bit */
4277
        stw(ptr, env->fpuc);
4278
        stw(ptr + 2, fpus);
4279
        stw(ptr + 4, fptag);
4280
        stw(ptr + 6, 0);
4281
        stw(ptr + 8, 0);
4282
        stw(ptr + 10, 0);
4283
        stw(ptr + 12, 0);
4284
    }
4285
}
4286

    
4287
void helper_fldenv(target_ulong ptr, int data32)
4288
{
4289
    int i, fpus, fptag;
4290

    
4291
    if (data32) {
4292
        env->fpuc = lduw(ptr);
4293
        fpus = lduw(ptr + 4);
4294
        fptag = lduw(ptr + 8);
4295
    }
4296
    else {
4297
        env->fpuc = lduw(ptr);
4298
        fpus = lduw(ptr + 2);
4299
        fptag = lduw(ptr + 4);
4300
    }
4301
    env->fpstt = (fpus >> 11) & 7;
4302
    env->fpus = fpus & ~0x3800;
4303
    for(i = 0;i < 8; i++) {
4304
        env->fptags[i] = ((fptag & 3) == 3);
4305
        fptag >>= 2;
4306
    }
4307
}
4308

    
4309
void helper_fsave(target_ulong ptr, int data32)
4310
{
4311
    CPU86_LDouble tmp;
4312
    int i;
4313

    
4314
    helper_fstenv(ptr, data32);
4315

    
4316
    ptr += (14 << data32);
4317
    for(i = 0;i < 8; i++) {
4318
        tmp = ST(i);
4319
        helper_fstt(tmp, ptr);
4320
        ptr += 10;
4321
    }
4322

    
4323
    /* fninit */
4324
    env->fpus = 0;
4325
    env->fpstt = 0;
4326
    env->fpuc = 0x37f;
4327
    env->fptags[0] = 1;
4328
    env->fptags[1] = 1;
4329
    env->fptags[2] = 1;
4330
    env->fptags[3] = 1;
4331
    env->fptags[4] = 1;
4332
    env->fptags[5] = 1;
4333
    env->fptags[6] = 1;
4334
    env->fptags[7] = 1;
4335
}
4336

    
4337
void helper_frstor(target_ulong ptr, int data32)
4338
{
4339
    CPU86_LDouble tmp;
4340
    int i;
4341

    
4342
    helper_fldenv(ptr, data32);
4343
    ptr += (14 << data32);
4344

    
4345
    for(i = 0;i < 8; i++) {
4346
        tmp = helper_fldt(ptr);
4347
        ST(i) = tmp;
4348
        ptr += 10;
4349
    }
4350
}
4351

    
4352
void helper_fxsave(target_ulong ptr, int data64)
4353
{
4354
    int fpus, fptag, i, nb_xmm_regs;
4355
    CPU86_LDouble tmp;
4356
    target_ulong addr;
4357

    
4358
    /* The operand must be 16 byte aligned */
4359
    if (ptr & 0xf) {
4360
        raise_exception(EXCP0D_GPF);
4361
    }
4362

    
4363
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4364
    fptag = 0;
4365
    for(i = 0; i < 8; i++) {
4366
        fptag |= (env->fptags[i] << i);
4367
    }
4368
    stw(ptr, env->fpuc);
4369
    stw(ptr + 2, fpus);
4370
    stw(ptr + 4, fptag ^ 0xff);
4371
#ifdef TARGET_X86_64
4372
    if (data64) {
4373
        stq(ptr + 0x08, 0); /* rip */
4374
        stq(ptr + 0x10, 0); /* rdp */
4375
    } else 
4376
#endif
4377
    {
4378
        stl(ptr + 0x08, 0); /* eip */
4379
        stl(ptr + 0x0c, 0); /* sel  */
4380
        stl(ptr + 0x10, 0); /* dp */
4381
        stl(ptr + 0x14, 0); /* sel  */
4382
    }
4383

    
4384
    addr = ptr + 0x20;
4385
    for(i = 0;i < 8; i++) {
4386
        tmp = ST(i);
4387
        helper_fstt(tmp, addr);
4388
        addr += 16;
4389
    }
4390

    
4391
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4392
        /* XXX: finish it */
4393
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4394
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4395
        if (env->hflags & HF_CS64_MASK)
4396
            nb_xmm_regs = 16;
4397
        else
4398
            nb_xmm_regs = 8;
4399
        addr = ptr + 0xa0;
4400
        /* Fast FXSAVE leaves out the XMM registers */
4401
        if (!(env->efer & MSR_EFER_FFXSR)
4402
          || (env->hflags & HF_CPL_MASK)
4403
          || !(env->hflags & HF_LMA_MASK)) {
4404
            for(i = 0; i < nb_xmm_regs; i++) {
4405
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4406
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4407
                addr += 16;
4408
            }
4409
        }
4410
    }
4411
}
4412

    
4413
void helper_fxrstor(target_ulong ptr, int data64)
4414
{
4415
    int i, fpus, fptag, nb_xmm_regs;
4416
    CPU86_LDouble tmp;
4417
    target_ulong addr;
4418

    
4419
    /* The operand must be 16 byte aligned */
4420
    if (ptr & 0xf) {
4421
        raise_exception(EXCP0D_GPF);
4422
    }
4423

    
4424
    env->fpuc = lduw(ptr);
4425
    fpus = lduw(ptr + 2);
4426
    fptag = lduw(ptr + 4);
4427
    env->fpstt = (fpus >> 11) & 7;
4428
    env->fpus = fpus & ~0x3800;
4429
    fptag ^= 0xff;
4430
    for(i = 0;i < 8; i++) {
4431
        env->fptags[i] = ((fptag >> i) & 1);
4432
    }
4433

    
4434
    addr = ptr + 0x20;
4435
    for(i = 0;i < 8; i++) {
4436
        tmp = helper_fldt(addr);
4437
        ST(i) = tmp;
4438
        addr += 16;
4439
    }
4440

    
4441
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4442
        /* XXX: finish it */
4443
        env->mxcsr = ldl(ptr + 0x18);
4444
        //ldl(ptr + 0x1c);
4445
        if (env->hflags & HF_CS64_MASK)
4446
            nb_xmm_regs = 16;
4447
        else
4448
            nb_xmm_regs = 8;
4449
        addr = ptr + 0xa0;
4450
        /* Fast FXRESTORE leaves out the XMM registers */
4451
        if (!(env->efer & MSR_EFER_FFXSR)
4452
          || (env->hflags & HF_CPL_MASK)
4453
          || !(env->hflags & HF_LMA_MASK)) {
4454
            for(i = 0; i < nb_xmm_regs; i++) {
4455
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4456
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4457
                addr += 16;
4458
            }
4459
        }
4460
    }
4461
}
4462

    
4463
#ifndef USE_X86LDOUBLE
4464

    
4465
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4466
{
4467
    CPU86_LDoubleU temp;
4468
    int e;
4469

    
4470
    temp.d = f;
4471
    /* mantissa */
4472
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4473
    /* exponent + sign */
4474
    e = EXPD(temp) - EXPBIAS + 16383;
4475
    e |= SIGND(temp) >> 16;
4476
    *pexp = e;
4477
}
4478

    
4479
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4480
{
4481
    CPU86_LDoubleU temp;
4482
    int e;
4483
    uint64_t ll;
4484

    
4485
    /* XXX: handle overflow ? */
4486
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4487
    e |= (upper >> 4) & 0x800; /* sign */
4488
    ll = (mant >> 11) & ((1LL << 52) - 1);
4489
#ifdef __arm__
4490
    temp.l.upper = (e << 20) | (ll >> 32);
4491
    temp.l.lower = ll;
4492
#else
4493
    temp.ll = ll | ((uint64_t)e << 52);
4494
#endif
4495
    return temp.d;
4496
}
4497

    
4498
#else
4499

    
4500
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4501
{
4502
    CPU86_LDoubleU temp;
4503

    
4504
    temp.d = f;
4505
    *pmant = temp.l.lower;
4506
    *pexp = temp.l.upper;
4507
}
4508

    
4509
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4510
{
4511
    CPU86_LDoubleU temp;
4512

    
4513
    temp.l.upper = upper;
4514
    temp.l.lower = mant;
4515
    return temp.d;
4516
}
4517
#endif
4518

    
4519
#ifdef TARGET_X86_64
4520

    
4521
//#define DEBUG_MULDIV
4522

    
4523
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4524
{
4525
    *plow += a;
4526
    /* carry test */
4527
    if (*plow < a)
4528
        (*phigh)++;
4529
    *phigh += b;
4530
}
4531

    
4532
static void neg128(uint64_t *plow, uint64_t *phigh)
4533
{
4534
    *plow = ~ *plow;
4535
    *phigh = ~ *phigh;
4536
    add128(plow, phigh, 1, 0);
4537
}
4538

    
4539
/* return TRUE if overflow */
4540
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4541
{
4542
    uint64_t q, r, a1, a0;
4543
    int i, qb, ab;
4544

    
4545
    a0 = *plow;
4546
    a1 = *phigh;
4547
    if (a1 == 0) {
4548
        q = a0 / b;
4549
        r = a0 % b;
4550
        *plow = q;
4551
        *phigh = r;
4552
    } else {
4553
        if (a1 >= b)
4554
            return 1;
4555
        /* XXX: use a better algorithm */
4556
        for(i = 0; i < 64; i++) {
4557
            ab = a1 >> 63;
4558
            a1 = (a1 << 1) | (a0 >> 63);
4559
            if (ab || a1 >= b) {
4560
                a1 -= b;
4561
                qb = 1;
4562
            } else {
4563
                qb = 0;
4564
            }
4565
            a0 = (a0 << 1) | qb;
4566
        }
4567
#if defined(DEBUG_MULDIV)
4568
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4569
               *phigh, *plow, b, a0, a1);
4570
#endif
4571
        *plow = a0;
4572
        *phigh = a1;
4573
    }
4574
    return 0;
4575
}
4576

    
4577
/* return TRUE if overflow */
4578
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4579
{
4580
    int sa, sb;
4581
    sa = ((int64_t)*phigh < 0);
4582
    if (sa)
4583
        neg128(plow, phigh);
4584
    sb = (b < 0);
4585
    if (sb)
4586
        b = -b;
4587
    if (div64(plow, phigh, b) != 0)
4588
        return 1;
4589
    if (sa ^ sb) {
4590
        if (*plow > (1ULL << 63))
4591
            return 1;
4592
        *plow = - *plow;
4593
    } else {
4594
        if (*plow >= (1ULL << 63))
4595
            return 1;
4596
    }
4597
    if (sa)
4598
        *phigh = - *phigh;
4599
    return 0;
4600
}
4601

    
4602
void helper_mulq_EAX_T0(target_ulong t0)
4603
{
4604
    uint64_t r0, r1;
4605

    
4606
    mulu64(&r0, &r1, EAX, t0);
4607
    EAX = r0;
4608
    EDX = r1;
4609
    CC_DST = r0;
4610
    CC_SRC = r1;
4611
}
4612

    
4613
void helper_imulq_EAX_T0(target_ulong t0)
4614
{
4615
    uint64_t r0, r1;
4616

    
4617
    muls64(&r0, &r1, EAX, t0);
4618
    EAX = r0;
4619
    EDX = r1;
4620
    CC_DST = r0;
4621
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4622
}
4623

    
4624
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4625
{
4626
    uint64_t r0, r1;
4627

    
4628
    muls64(&r0, &r1, t0, t1);
4629
    CC_DST = r0;
4630
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4631
    return r0;
4632
}
4633

    
4634
void helper_divq_EAX(target_ulong t0)
4635
{
4636
    uint64_t r0, r1;
4637
    if (t0 == 0) {
4638
        raise_exception(EXCP00_DIVZ);
4639
    }
4640
    r0 = EAX;
4641
    r1 = EDX;
4642
    if (div64(&r0, &r1, t0))
4643
        raise_exception(EXCP00_DIVZ);
4644
    EAX = r0;
4645
    EDX = r1;
4646
}
4647

    
4648
void helper_idivq_EAX(target_ulong t0)
4649
{
4650
    uint64_t r0, r1;
4651
    if (t0 == 0) {
4652
        raise_exception(EXCP00_DIVZ);
4653
    }
4654
    r0 = EAX;
4655
    r1 = EDX;
4656
    if (idiv64(&r0, &r1, t0))
4657
        raise_exception(EXCP00_DIVZ);
4658
    EAX = r0;
4659
    EDX = r1;
4660
}
4661
#endif
4662

    
4663
static void do_hlt(void)
4664
{
4665
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4666
    env->halted = 1;
4667
    env->exception_index = EXCP_HLT;
4668
    cpu_loop_exit();
4669
}
4670

    
4671
void helper_hlt(int next_eip_addend)
4672
{
4673
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4674
    EIP += next_eip_addend;
4675
    
4676
    do_hlt();
4677
}
4678

    
4679
void helper_monitor(target_ulong ptr)
4680
{
4681
    if ((uint32_t)ECX != 0)
4682
        raise_exception(EXCP0D_GPF);
4683
    /* XXX: store address ? */
4684
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4685
}
4686

    
4687
void helper_mwait(int next_eip_addend)
4688
{
4689
    if ((uint32_t)ECX != 0)
4690
        raise_exception(EXCP0D_GPF);
4691
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4692
    EIP += next_eip_addend;
4693

    
4694
    /* XXX: not complete but not completely erroneous */
4695
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4696
        /* more than one CPU: do not sleep because another CPU may
4697
           wake this one */
4698
    } else {
4699
        do_hlt();
4700
    }
4701
}
4702

    
4703
void helper_debug(void)
4704
{
4705
    env->exception_index = EXCP_DEBUG;
4706
    cpu_loop_exit();
4707
}
4708

    
4709
void helper_reset_rf(void)
4710
{
4711
    env->eflags &= ~RF_MASK;
4712
}
4713

    
4714
void helper_raise_interrupt(int intno, int next_eip_addend)
4715
{
4716
    raise_interrupt(intno, 1, 0, next_eip_addend);
4717
}
4718

    
4719
void helper_raise_exception(int exception_index)
4720
{
4721
    raise_exception(exception_index);
4722
}
4723

    
4724
void helper_cli(void)
4725
{
4726
    env->eflags &= ~IF_MASK;
4727
}
4728

    
4729
void helper_sti(void)
4730
{
4731
    env->eflags |= IF_MASK;
4732
}
4733

    
4734
#if 0
4735
/* vm86plus instructions */
4736
void helper_cli_vm(void)
4737
{
4738
    env->eflags &= ~VIF_MASK;
4739
}
4740

4741
void helper_sti_vm(void)
4742
{
4743
    env->eflags |= VIF_MASK;
4744
    if (env->eflags & VIP_MASK) {
4745
        raise_exception(EXCP0D_GPF);
4746
    }
4747
}
4748
#endif
4749

    
4750
void helper_set_inhibit_irq(void)
4751
{
4752
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4753
}
4754

    
4755
void helper_reset_inhibit_irq(void)
4756
{
4757
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4758
}
4759

    
4760
void helper_boundw(target_ulong a0, int v)
4761
{
4762
    int low, high;
4763
    low = ldsw(a0);
4764
    high = ldsw(a0 + 2);
4765
    v = (int16_t)v;
4766
    if (v < low || v > high) {
4767
        raise_exception(EXCP05_BOUND);
4768
    }
4769
}
4770

    
4771
void helper_boundl(target_ulong a0, int v)
4772
{
4773
    int low, high;
4774
    low = ldl(a0);
4775
    high = ldl(a0 + 4);
4776
    if (v < low || v > high) {
4777
        raise_exception(EXCP05_BOUND);
4778
    }
4779
}
4780

    
4781
static float approx_rsqrt(float a)
4782
{
4783
    return 1.0 / sqrt(a);
4784
}
4785

    
4786
static float approx_rcp(float a)
4787
{
4788
    return 1.0 / a;
4789
}
4790

    
4791
#if !defined(CONFIG_USER_ONLY)
4792

    
4793
#define MMUSUFFIX _mmu
4794

    
4795
#define SHIFT 0
4796
#include "softmmu_template.h"
4797

    
4798
#define SHIFT 1
4799
#include "softmmu_template.h"
4800

    
4801
#define SHIFT 2
4802
#include "softmmu_template.h"
4803

    
4804
#define SHIFT 3
4805
#include "softmmu_template.h"
4806

    
4807
#endif
4808

    
4809
#if !defined(CONFIG_USER_ONLY)
4810
/* try to fill the TLB and return an exception if error. If retaddr is
4811
   NULL, it means that the function was called in C code (i.e. not
4812
   from generated code or from helper.c) */
4813
/* XXX: fix it to restore all registers */
4814
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4815
{
4816
    TranslationBlock *tb;
4817
    int ret;
4818
    unsigned long pc;
4819
    CPUX86State *saved_env;
4820

    
4821
    /* XXX: hack to restore env in all cases, even if not called from
4822
       generated code */
4823
    saved_env = env;
4824
    env = cpu_single_env;
4825

    
4826
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4827
    if (ret) {
4828
        if (retaddr) {
4829
            /* now we have a real cpu fault */
4830
            pc = (unsigned long)retaddr;
4831
            tb = tb_find_pc(pc);
4832
            if (tb) {
4833
                /* the PC is inside the translated code. It means that we have
4834
                   a virtual CPU fault */
4835
                cpu_restore_state(tb, env, pc, NULL);
4836
            }
4837
        }
4838
        raise_exception_err(env->exception_index, env->error_code);
4839
    }
4840
    env = saved_env;
4841
}
4842
#endif
4843

    
4844
/* Secure Virtual Machine helpers */
4845

    
4846
#if defined(CONFIG_USER_ONLY)
4847

    
4848
void helper_vmrun(int aflag, int next_eip_addend)
4849
{ 
4850
}
4851
void helper_vmmcall(void) 
4852
{ 
4853
}
4854
void helper_vmload(int aflag)
4855
{ 
4856
}
4857
void helper_vmsave(int aflag)
4858
{ 
4859
}
4860
void helper_stgi(void)
4861
{
4862
}
4863
void helper_clgi(void)
4864
{
4865
}
4866
void helper_skinit(void) 
4867
{ 
4868
}
4869
void helper_invlpga(int aflag)
4870
{ 
4871
}
4872
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4873
{ 
4874
}
4875
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4876
{
4877
}
4878

    
4879
void helper_svm_check_io(uint32_t port, uint32_t param, 
4880
                         uint32_t next_eip_addend)
4881
{
4882
}
4883
#else
4884

    
4885
static inline void svm_save_seg(target_phys_addr_t addr,
4886
                                const SegmentCache *sc)
4887
{
4888
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4889
             sc->selector);
4890
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4891
             sc->base);
4892
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4893
             sc->limit);
4894
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4895
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4896
}
4897
                                
4898
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4899
{
4900
    unsigned int flags;
4901

    
4902
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4903
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4904
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4905
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4906
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4907
}
4908

    
4909
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4910
                                      CPUState *env, int seg_reg)
4911
{
4912
    SegmentCache sc1, *sc = &sc1;
4913
    svm_load_seg(addr, sc);
4914
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4915
                           sc->base, sc->limit, sc->flags);
4916
}
4917

    
4918
void helper_vmrun(int aflag, int next_eip_addend)
4919
{
4920
    target_ulong addr;
4921
    uint32_t event_inj;
4922
    uint32_t int_ctl;
4923

    
4924
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4925

    
4926
    if (aflag == 2)
4927
        addr = EAX;
4928
    else
4929
        addr = (uint32_t)EAX;
4930

    
4931
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4932

    
4933
    env->vm_vmcb = addr;
4934

    
4935
    /* save the current CPU state in the hsave page */
4936
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4937
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4938

    
4939
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4940
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4941

    
4942
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4943
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4944
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4945
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4946
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4948

    
4949
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4950
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4951

    
4952
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4953
                  &env->segs[R_ES]);
4954
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4955
                 &env->segs[R_CS]);
4956
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4957
                 &env->segs[R_SS]);
4958
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4959
                 &env->segs[R_DS]);
4960

    
4961
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4962
             EIP + next_eip_addend);
4963
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4964
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4965

    
4966
    /* load the interception bitmaps so we do not need to access the
4967
       vmcb in svm mode */
4968
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4969
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4970
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4971
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4972
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4973
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4974

    
4975
    /* enable intercepts */
4976
    env->hflags |= HF_SVMI_MASK;
4977

    
4978
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4979

    
4980
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4981
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4982

    
4983
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4984
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4985

    
4986
    /* clear exit_info_2 so we behave like the real hardware */
4987
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4988

    
4989
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4990
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4991
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4992
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4993
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4994
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4995
    if (int_ctl & V_INTR_MASKING_MASK) {
4996
        env->v_tpr = int_ctl & V_TPR_MASK;
4997
        env->hflags2 |= HF2_VINTR_MASK;
4998
        if (env->eflags & IF_MASK)
4999
            env->hflags2 |= HF2_HIF_MASK;
5000
    }
5001

    
5002
    cpu_load_efer(env, 
5003
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5004
    env->eflags = 0;
5005
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5006
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5007
    CC_OP = CC_OP_EFLAGS;
5008

    
5009
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5010
                       env, R_ES);
5011
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5012
                       env, R_CS);
5013
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5014
                       env, R_SS);
5015
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5016
                       env, R_DS);
5017

    
5018
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5019
    env->eip = EIP;
5020
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5021
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5022
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5023
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5024
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5025

    
5026
    /* FIXME: guest state consistency checks */
5027

    
5028
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5029
        case TLB_CONTROL_DO_NOTHING:
5030
            break;
5031
        case TLB_CONTROL_FLUSH_ALL_ASID:
5032
            /* FIXME: this is not 100% correct but should work for now */
5033
            tlb_flush(env, 1);
5034
        break;
5035
    }
5036

    
5037
    env->hflags2 |= HF2_GIF_MASK;
5038

    
5039
    if (int_ctl & V_IRQ_MASK) {
5040
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5041
    }
5042

    
5043
    /* maybe we need to inject an event */
5044
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5045
    if (event_inj & SVM_EVTINJ_VALID) {
5046
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5047
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5048
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5049

    
5050
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5051
        /* FIXME: need to implement valid_err */
5052
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5053
        case SVM_EVTINJ_TYPE_INTR:
5054
                env->exception_index = vector;
5055
                env->error_code = event_inj_err;
5056
                env->exception_is_int = 0;
5057
                env->exception_next_eip = -1;
5058
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5059
                /* XXX: is it always correct ? */
5060
                do_interrupt(vector, 0, 0, 0, 1);
5061
                break;
5062
        case SVM_EVTINJ_TYPE_NMI:
5063
                env->exception_index = EXCP02_NMI;
5064
                env->error_code = event_inj_err;
5065
                env->exception_is_int = 0;
5066
                env->exception_next_eip = EIP;
5067
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5068
                cpu_loop_exit();
5069
                break;
5070
        case SVM_EVTINJ_TYPE_EXEPT:
5071
                env->exception_index = vector;
5072
                env->error_code = event_inj_err;
5073
                env->exception_is_int = 0;
5074
                env->exception_next_eip = -1;
5075
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5076
                cpu_loop_exit();
5077
                break;
5078
        case SVM_EVTINJ_TYPE_SOFT:
5079
                env->exception_index = vector;
5080
                env->error_code = event_inj_err;
5081
                env->exception_is_int = 1;
5082
                env->exception_next_eip = EIP;
5083
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5084
                cpu_loop_exit();
5085
                break;
5086
        }
5087
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5088
    }
5089
}
5090

    
5091
void helper_vmmcall(void)
5092
{
5093
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5094
    raise_exception(EXCP06_ILLOP);
5095
}
5096

    
5097
void helper_vmload(int aflag)
5098
{
5099
    target_ulong addr;
5100
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5101

    
5102
    if (aflag == 2)
5103
        addr = EAX;
5104
    else
5105
        addr = (uint32_t)EAX;
5106

    
5107
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5108
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5109
                env->segs[R_FS].base);
5110

    
5111
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5112
                       env, R_FS);
5113
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5114
                       env, R_GS);
5115
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5116
                 &env->tr);
5117
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5118
                 &env->ldt);
5119

    
5120
#ifdef TARGET_X86_64
5121
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5122
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5123
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5124
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5125
#endif
5126
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5127
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5128
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5129
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5130
}
5131

    
5132
void helper_vmsave(int aflag)
5133
{
5134
    target_ulong addr;
5135
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5136

    
5137
    if (aflag == 2)
5138
        addr = EAX;
5139
    else
5140
        addr = (uint32_t)EAX;
5141

    
5142
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5143
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5144
                env->segs[R_FS].base);
5145

    
5146
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5147
                 &env->segs[R_FS]);
5148
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5149
                 &env->segs[R_GS]);
5150
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5151
                 &env->tr);
5152
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5153
                 &env->ldt);
5154

    
5155
#ifdef TARGET_X86_64
5156
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5157
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5158
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5159
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5160
#endif
5161
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5162
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5163
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5164
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5165
}
5166

    
5167
void helper_stgi(void)
5168
{
5169
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5170
    env->hflags2 |= HF2_GIF_MASK;
5171
}
5172

    
5173
void helper_clgi(void)
5174
{
5175
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5176
    env->hflags2 &= ~HF2_GIF_MASK;
5177
}
5178

    
5179
void helper_skinit(void)
5180
{
5181
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5182
    /* XXX: not implemented */
5183
    raise_exception(EXCP06_ILLOP);
5184
}
5185

    
5186
void helper_invlpga(int aflag)
5187
{
5188
    target_ulong addr;
5189
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5190
    
5191
    if (aflag == 2)
5192
        addr = EAX;
5193
    else
5194
        addr = (uint32_t)EAX;
5195

    
5196
    /* XXX: could use the ASID to see if it is needed to do the
5197
       flush */
5198
    tlb_flush_page(env, addr);
5199
}
5200

    
5201
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5202
{
5203
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5204
        return;
5205
    switch(type) {
5206
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5207
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5208
            helper_vmexit(type, param);
5209
        }
5210
        break;
5211
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5212
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5213
            helper_vmexit(type, param);
5214
        }
5215
        break;
5216
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5217
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5218
            helper_vmexit(type, param);
5219
        }
5220
        break;
5221
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5222
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5223
            helper_vmexit(type, param);
5224
        }
5225
        break;
5226
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5227
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5228
            helper_vmexit(type, param);
5229
        }
5230
        break;
5231
    case SVM_EXIT_MSR:
5232
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5233
            /* FIXME: this should be read in at vmrun (faster this way?) */
5234
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5235
            uint32_t t0, t1;
5236
            switch((uint32_t)ECX) {
5237
            case 0 ... 0x1fff:
5238
                t0 = (ECX * 2) % 8;
5239
                t1 = ECX / 8;
5240
                break;
5241
            case 0xc0000000 ... 0xc0001fff:
5242
                t0 = (8192 + ECX - 0xc0000000) * 2;
5243
                t1 = (t0 / 8);
5244
                t0 %= 8;
5245
                break;
5246
            case 0xc0010000 ... 0xc0011fff:
5247
                t0 = (16384 + ECX - 0xc0010000) * 2;
5248
                t1 = (t0 / 8);
5249
                t0 %= 8;
5250
                break;
5251
            default:
5252
                helper_vmexit(type, param);
5253
                t0 = 0;
5254
                t1 = 0;
5255
                break;
5256
            }
5257
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5258
                helper_vmexit(type, param);
5259
        }
5260
        break;
5261
    default:
5262
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5263
            helper_vmexit(type, param);
5264
        }
5265
        break;
5266
    }
5267
}
5268

    
5269
void helper_svm_check_io(uint32_t port, uint32_t param, 
5270
                         uint32_t next_eip_addend)
5271
{
5272
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5273
        /* FIXME: this should be read in at vmrun (faster this way?) */
5274
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5275
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5276
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5277
            /* next EIP */
5278
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5279
                     env->eip + next_eip_addend);
5280
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5281
        }
5282
    }
5283
}
5284

    
5285
/* Note: currently only 32 bits of exit_code are used */
5286
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5287
{
5288
    uint32_t int_ctl;
5289

    
5290
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5291
                exit_code, exit_info_1,
5292
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5293
                EIP);
5294

    
5295
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5296
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5297
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5298
    } else {
5299
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5300
    }
5301

    
5302
    /* Save the VM state in the vmcb */
5303
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5304
                 &env->segs[R_ES]);
5305
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5306
                 &env->segs[R_CS]);
5307
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5308
                 &env->segs[R_SS]);
5309
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5310
                 &env->segs[R_DS]);
5311

    
5312
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5313
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5314

    
5315
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5316
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5317

    
5318
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5319
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5320
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5321
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5323

    
5324
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5325
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5326
    int_ctl |= env->v_tpr & V_TPR_MASK;
5327
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5328
        int_ctl |= V_IRQ_MASK;
5329
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5330

    
5331
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5332
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5333
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5335
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5337
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5338

    
5339
    /* Reload the host state from vm_hsave */
5340
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5341
    env->hflags &= ~HF_SVMI_MASK;
5342
    env->intercept = 0;
5343
    env->intercept_exceptions = 0;
5344
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5345
    env->tsc_offset = 0;
5346

    
5347
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5348
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5349

    
5350
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5351
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5352

    
5353
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5354
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5355
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5356
    /* we need to set the efer after the crs so the hidden flags get
5357
       set properly */
5358
    cpu_load_efer(env, 
5359
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5360
    env->eflags = 0;
5361
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5362
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5363
    CC_OP = CC_OP_EFLAGS;
5364

    
5365
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5366
                       env, R_ES);
5367
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5368
                       env, R_CS);
5369
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5370
                       env, R_SS);
5371
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5372
                       env, R_DS);
5373

    
5374
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5375
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5376
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5377

    
5378
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5379
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5380

    
5381
    /* other setups */
5382
    cpu_x86_set_cpl(env, 0);
5383
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5384
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5385

    
5386
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5387
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5388
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5389
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5390

    
5391
    env->hflags2 &= ~HF2_GIF_MASK;
5392
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5393

    
5394
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5395

    
5396
    /* Clears the TSC_OFFSET inside the processor. */
5397

    
5398
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5399
       from the page table indicated the host's CR3. If the PDPEs contain
5400
       illegal state, the processor causes a shutdown. */
5401

    
5402
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5403
    env->cr[0] |= CR0_PE_MASK;
5404
    env->eflags &= ~VM_MASK;
5405

    
5406
    /* Disables all breakpoints in the host DR7 register. */
5407

    
5408
    /* Checks the reloaded host state for consistency. */
5409

    
5410
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5411
       host's code segment or non-canonical (in the case of long mode), a
5412
       #GP fault is delivered inside the host.) */
5413

    
5414
    /* remove any pending exception */
5415
    env->exception_index = -1;
5416
    env->error_code = 0;
5417
    env->old_exception = -1;
5418

    
5419
    cpu_loop_exit();
5420
}
5421

    
5422
#endif
5423

    
5424
/* MMX/SSE */
5425
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5426
void helper_enter_mmx(void)
5427
{
5428
    env->fpstt = 0;
5429
    *(uint32_t *)(env->fptags) = 0;
5430
    *(uint32_t *)(env->fptags + 4) = 0;
5431
}
5432

    
5433
void helper_emms(void)
5434
{
5435
    /* set to empty state */
5436
    *(uint32_t *)(env->fptags) = 0x01010101;
5437
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5438
}
5439

    
5440
/* XXX: suppress */
5441
void helper_movq(void *d, void *s)
5442
{
5443
    *(uint64_t *)d = *(uint64_t *)s;
5444
}
5445

    
5446
#define SHIFT 0
5447
#include "ops_sse.h"
5448

    
5449
#define SHIFT 1
5450
#include "ops_sse.h"
5451

    
5452
#define SHIFT 0
5453
#include "helper_template.h"
5454
#undef SHIFT
5455

    
5456
#define SHIFT 1
5457
#include "helper_template.h"
5458
#undef SHIFT
5459

    
5460
#define SHIFT 2
5461
#include "helper_template.h"
5462
#undef SHIFT
5463

    
5464
#ifdef TARGET_X86_64
5465

    
5466
#define SHIFT 3
5467
#include "helper_template.h"
5468
#undef SHIFT
5469

    
5470
#endif
5471

    
5472
/* bit operations */
5473
target_ulong helper_bsf(target_ulong t0)
5474
{
5475
    int count;
5476
    target_ulong res;
5477

    
5478
    res = t0;
5479
    count = 0;
5480
    while ((res & 1) == 0) {
5481
        count++;
5482
        res >>= 1;
5483
    }
5484
    return count;
5485
}
5486

    
5487
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5488
{
5489
    int count;
5490
    target_ulong res, mask;
5491

    
5492
    if (wordsize > 0 && t0 == 0) {
5493
        return wordsize;
5494
    }
5495
    res = t0;
5496
    count = TARGET_LONG_BITS - 1;
5497
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5498
    while ((res & mask) == 0) {
5499
        count--;
5500
        res <<= 1;
5501
    }
5502
    if (wordsize > 0) {
5503
        return wordsize - 1 - count;
5504
    }
5505
    return count;
5506
}
5507

    
5508
target_ulong helper_bsr(target_ulong t0)
5509
{
5510
        return helper_lzcnt(t0, 0);
5511
}
5512

    
5513
static int compute_all_eflags(void)
5514
{
5515
    return CC_SRC;
5516
}
5517

    
5518
static int compute_c_eflags(void)
5519
{
5520
    return CC_SRC & CC_C;
5521
}
5522

    
5523
uint32_t helper_cc_compute_all(int op)
5524
{
5525
    switch (op) {
5526
    default: /* should never happen */ return 0;
5527

    
5528
    case CC_OP_EFLAGS: return compute_all_eflags();
5529

    
5530
    case CC_OP_MULB: return compute_all_mulb();
5531
    case CC_OP_MULW: return compute_all_mulw();
5532
    case CC_OP_MULL: return compute_all_mull();
5533

    
5534
    case CC_OP_ADDB: return compute_all_addb();
5535
    case CC_OP_ADDW: return compute_all_addw();
5536
    case CC_OP_ADDL: return compute_all_addl();
5537

    
5538
    case CC_OP_ADCB: return compute_all_adcb();
5539
    case CC_OP_ADCW: return compute_all_adcw();
5540
    case CC_OP_ADCL: return compute_all_adcl();
5541

    
5542
    case CC_OP_SUBB: return compute_all_subb();
5543
    case CC_OP_SUBW: return compute_all_subw();
5544
    case CC_OP_SUBL: return compute_all_subl();
5545

    
5546
    case CC_OP_SBBB: return compute_all_sbbb();
5547
    case CC_OP_SBBW: return compute_all_sbbw();
5548
    case CC_OP_SBBL: return compute_all_sbbl();
5549

    
5550
    case CC_OP_LOGICB: return compute_all_logicb();
5551
    case CC_OP_LOGICW: return compute_all_logicw();
5552
    case CC_OP_LOGICL: return compute_all_logicl();
5553

    
5554
    case CC_OP_INCB: return compute_all_incb();
5555
    case CC_OP_INCW: return compute_all_incw();
5556
    case CC_OP_INCL: return compute_all_incl();
5557

    
5558
    case CC_OP_DECB: return compute_all_decb();
5559
    case CC_OP_DECW: return compute_all_decw();
5560
    case CC_OP_DECL: return compute_all_decl();
5561

    
5562
    case CC_OP_SHLB: return compute_all_shlb();
5563
    case CC_OP_SHLW: return compute_all_shlw();
5564
    case CC_OP_SHLL: return compute_all_shll();
5565

    
5566
    case CC_OP_SARB: return compute_all_sarb();
5567
    case CC_OP_SARW: return compute_all_sarw();
5568
    case CC_OP_SARL: return compute_all_sarl();
5569

    
5570
#ifdef TARGET_X86_64
5571
    case CC_OP_MULQ: return compute_all_mulq();
5572

    
5573
    case CC_OP_ADDQ: return compute_all_addq();
5574

    
5575
    case CC_OP_ADCQ: return compute_all_adcq();
5576

    
5577
    case CC_OP_SUBQ: return compute_all_subq();
5578

    
5579
    case CC_OP_SBBQ: return compute_all_sbbq();
5580

    
5581
    case CC_OP_LOGICQ: return compute_all_logicq();
5582

    
5583
    case CC_OP_INCQ: return compute_all_incq();
5584

    
5585
    case CC_OP_DECQ: return compute_all_decq();
5586

    
5587
    case CC_OP_SHLQ: return compute_all_shlq();
5588

    
5589
    case CC_OP_SARQ: return compute_all_sarq();
5590
#endif
5591
    }
5592
}
5593

    
5594
uint32_t helper_cc_compute_c(int op)
5595
{
5596
    switch (op) {
5597
    default: /* should never happen */ return 0;
5598

    
5599
    case CC_OP_EFLAGS: return compute_c_eflags();
5600

    
5601
    case CC_OP_MULB: return compute_c_mull();
5602
    case CC_OP_MULW: return compute_c_mull();
5603
    case CC_OP_MULL: return compute_c_mull();
5604

    
5605
    case CC_OP_ADDB: return compute_c_addb();
5606
    case CC_OP_ADDW: return compute_c_addw();
5607
    case CC_OP_ADDL: return compute_c_addl();
5608

    
5609
    case CC_OP_ADCB: return compute_c_adcb();
5610
    case CC_OP_ADCW: return compute_c_adcw();
5611
    case CC_OP_ADCL: return compute_c_adcl();
5612

    
5613
    case CC_OP_SUBB: return compute_c_subb();
5614
    case CC_OP_SUBW: return compute_c_subw();
5615
    case CC_OP_SUBL: return compute_c_subl();
5616

    
5617
    case CC_OP_SBBB: return compute_c_sbbb();
5618
    case CC_OP_SBBW: return compute_c_sbbw();
5619
    case CC_OP_SBBL: return compute_c_sbbl();
5620

    
5621
    case CC_OP_LOGICB: return compute_c_logicb();
5622
    case CC_OP_LOGICW: return compute_c_logicw();
5623
    case CC_OP_LOGICL: return compute_c_logicl();
5624

    
5625
    case CC_OP_INCB: return compute_c_incl();
5626
    case CC_OP_INCW: return compute_c_incl();
5627
    case CC_OP_INCL: return compute_c_incl();
5628

    
5629
    case CC_OP_DECB: return compute_c_incl();
5630
    case CC_OP_DECW: return compute_c_incl();
5631
    case CC_OP_DECL: return compute_c_incl();
5632

    
5633
    case CC_OP_SHLB: return compute_c_shlb();
5634
    case CC_OP_SHLW: return compute_c_shlw();
5635
    case CC_OP_SHLL: return compute_c_shll();
5636

    
5637
    case CC_OP_SARB: return compute_c_sarl();
5638
    case CC_OP_SARW: return compute_c_sarl();
5639
    case CC_OP_SARL: return compute_c_sarl();
5640

    
5641
#ifdef TARGET_X86_64
5642
    case CC_OP_MULQ: return compute_c_mull();
5643

    
5644
    case CC_OP_ADDQ: return compute_c_addq();
5645

    
5646
    case CC_OP_ADCQ: return compute_c_adcq();
5647

    
5648
    case CC_OP_SUBQ: return compute_c_subq();
5649

    
5650
    case CC_OP_SBBQ: return compute_c_sbbq();
5651

    
5652
    case CC_OP_LOGICQ: return compute_c_logicq();
5653

    
5654
    case CC_OP_INCQ: return compute_c_incl();
5655

    
5656
    case CC_OP_DECQ: return compute_c_incl();
5657

    
5658
    case CC_OP_SHLQ: return compute_c_shlq();
5659

    
5660
    case CC_OP_SARQ: return compute_c_sarl();
5661
#endif
5662
    }
5663
}