Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ ab5ea558

History | View | Annotate | Download (159.1 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#define CPU_NO_GLOBAL_REGS
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352

    
353
    /* NOTE: we must avoid memory exceptions during the task switch,
354
       so we make dummy accesses before */
355
    /* XXX: it can still fail in some cases, so a bigger hack is
356
       necessary to valid the TLB after having done the accesses */
357

    
358
    v1 = ldub_kernel(env->tr.base);
359
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360
    stb_kernel(env->tr.base, v1);
361
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
362

    
363
    /* clear busy bit (it is restartable) */
364
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365
        target_ulong ptr;
366
        uint32_t e2;
367
        ptr = env->gdt.base + (env->tr.selector & ~7);
368
        e2 = ldl_kernel(ptr + 4);
369
        e2 &= ~DESC_TSS_BUSY_MASK;
370
        stl_kernel(ptr + 4, e2);
371
    }
372
    old_eflags = compute_eflags();
373
    if (source == SWITCH_TSS_IRET)
374
        old_eflags &= ~NT_MASK;
375

    
376
    /* save the current state in the old TSS */
377
    if (type & 8) {
378
        /* 32 bit */
379
        stl_kernel(env->tr.base + 0x20, next_eip);
380
        stl_kernel(env->tr.base + 0x24, old_eflags);
381
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403
        for(i = 0; i < 4; i++)
404
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405
    }
406

    
407
    /* now if an exception occurs, it will occurs in the next task
408
       context */
409

    
410
    if (source == SWITCH_TSS_CALL) {
411
        stw_kernel(tss_base, env->tr.selector);
412
        new_eflags |= NT_MASK;
413
    }
414

    
415
    /* set busy bit */
416
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417
        target_ulong ptr;
418
        uint32_t e2;
419
        ptr = env->gdt.base + (tss_selector & ~7);
420
        e2 = ldl_kernel(ptr + 4);
421
        e2 |= DESC_TSS_BUSY_MASK;
422
        stl_kernel(ptr + 4, e2);
423
    }
424

    
425
    /* set the new CPU state */
426
    /* from this point, any exception which occurs can give problems */
427
    env->cr[0] |= CR0_TS_MASK;
428
    env->hflags |= HF_TS_MASK;
429
    env->tr.selector = tss_selector;
430
    env->tr.base = tss_base;
431
    env->tr.limit = tss_limit;
432
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433

    
434
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435
        cpu_x86_update_cr3(env, new_cr3);
436
    }
437

    
438
    /* load all registers without an exception, then reload them with
439
       possible exception */
440
    env->eip = new_eip;
441
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443
    if (!(type & 8))
444
        eflags_mask &= 0xffff;
445
    load_eflags(new_eflags, eflags_mask);
446
    /* XXX: what to do in 16 bit case ? */
447
    EAX = new_regs[0];
448
    ECX = new_regs[1];
449
    EDX = new_regs[2];
450
    EBX = new_regs[3];
451
    ESP = new_regs[4];
452
    EBP = new_regs[5];
453
    ESI = new_regs[6];
454
    EDI = new_regs[7];
455
    if (new_eflags & VM_MASK) {
456
        for(i = 0; i < 6; i++)
457
            load_seg_vm(i, new_segs[i]);
458
        /* in vm86, CPL is always 3 */
459
        cpu_x86_set_cpl(env, 3);
460
    } else {
461
        /* CPL is set the RPL of CS */
462
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463
        /* first just selectors as the rest may trigger exceptions */
464
        for(i = 0; i < 6; i++)
465
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466
    }
467

    
468
    env->ldt.selector = new_ldt & ~4;
469
    env->ldt.base = 0;
470
    env->ldt.limit = 0;
471
    env->ldt.flags = 0;
472

    
473
    /* load the LDT */
474
    if (new_ldt & 4)
475
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476

    
477
    if ((new_ldt & 0xfffc) != 0) {
478
        dt = &env->gdt;
479
        index = new_ldt & ~7;
480
        if ((index + 7) > dt->limit)
481
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482
        ptr = dt->base + index;
483
        e1 = ldl_kernel(ptr);
484
        e2 = ldl_kernel(ptr + 4);
485
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487
        if (!(e2 & DESC_P_MASK))
488
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
490
    }
491

    
492
    /* load the segments */
493
    if (!(new_eflags & VM_MASK)) {
494
        tss_load_seg(R_CS, new_segs[R_CS]);
495
        tss_load_seg(R_SS, new_segs[R_SS]);
496
        tss_load_seg(R_ES, new_segs[R_ES]);
497
        tss_load_seg(R_DS, new_segs[R_DS]);
498
        tss_load_seg(R_FS, new_segs[R_FS]);
499
        tss_load_seg(R_GS, new_segs[R_GS]);
500
    }
501

    
502
    /* check that EIP is in the CS segment limits */
503
    if (new_eip > env->segs[R_CS].limit) {
504
        /* XXX: different exception if CALL ? */
505
        raise_exception_err(EXCP0D_GPF, 0);
506
    }
507

    
508
#ifndef CONFIG_USER_ONLY
509
    /* reset local breakpoints */
510
    if (env->dr[7] & 0x55) {
511
        for (i = 0; i < 4; i++) {
512
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513
                hw_breakpoint_remove(env, i);
514
        }
515
        env->dr[7] &= ~0x55;
516
    }
517
#endif
518
}
519

    
520
/* check if Port I/O is allowed in TSS */
521
static inline void check_io(int addr, int size)
522
{
523
    int io_offset, val, mask;
524

    
525
    /* TSS must be a valid 32 bit one */
526
    if (!(env->tr.flags & DESC_P_MASK) ||
527
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528
        env->tr.limit < 103)
529
        goto fail;
530
    io_offset = lduw_kernel(env->tr.base + 0x66);
531
    io_offset += (addr >> 3);
532
    /* Note: the check needs two bytes */
533
    if ((io_offset + 1) > env->tr.limit)
534
        goto fail;
535
    val = lduw_kernel(env->tr.base + io_offset);
536
    val >>= (addr & 7);
537
    mask = (1 << size) - 1;
538
    /* all bits must be zero to allow the I/O */
539
    if ((val & mask) != 0) {
540
    fail:
541
        raise_exception_err(EXCP0D_GPF, 0);
542
    }
543
}
544

    
545
void helper_check_iob(uint32_t t0)
546
{
547
    check_io(t0, 1);
548
}
549

    
550
void helper_check_iow(uint32_t t0)
551
{
552
    check_io(t0, 2);
553
}
554

    
555
void helper_check_iol(uint32_t t0)
556
{
557
    check_io(t0, 4);
558
}
559

    
560
void helper_outb(uint32_t port, uint32_t data)
561
{
562
    cpu_outb(port, data & 0xff);
563
}
564

    
565
target_ulong helper_inb(uint32_t port)
566
{
567
    return cpu_inb(port);
568
}
569

    
570
void helper_outw(uint32_t port, uint32_t data)
571
{
572
    cpu_outw(port, data & 0xffff);
573
}
574

    
575
target_ulong helper_inw(uint32_t port)
576
{
577
    return cpu_inw(port);
578
}
579

    
580
void helper_outl(uint32_t port, uint32_t data)
581
{
582
    cpu_outl(port, data);
583
}
584

    
585
target_ulong helper_inl(uint32_t port)
586
{
587
    return cpu_inl(port);
588
}
589

    
590
static inline unsigned int get_sp_mask(unsigned int e2)
591
{
592
    if (e2 & DESC_B_MASK)
593
        return 0xffffffff;
594
    else
595
        return 0xffff;
596
}
597

    
598
static int exeption_has_error_code(int intno)
599
{
600
        switch(intno) {
601
        case 8:
602
        case 10:
603
        case 11:
604
        case 12:
605
        case 13:
606
        case 14:
607
        case 17:
608
            return 1;
609
        }
610
        return 0;
611
}
612

    
613
#ifdef TARGET_X86_64
614
#define SET_ESP(val, sp_mask)\
615
do {\
616
    if ((sp_mask) == 0xffff)\
617
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
618
    else if ((sp_mask) == 0xffffffffLL)\
619
        ESP = (uint32_t)(val);\
620
    else\
621
        ESP = (val);\
622
} while (0)
623
#else
624
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
625
#endif
626

    
627
/* in 64-bit machines, this can overflow. So this segment addition macro
628
 * can be used to trim the value to 32-bit whenever needed */
629
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630

    
631
/* XXX: add a is_user flag to have proper security support */
632
#define PUSHW(ssp, sp, sp_mask, val)\
633
{\
634
    sp -= 2;\
635
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
636
}
637

    
638
#define PUSHL(ssp, sp, sp_mask, val)\
639
{\
640
    sp -= 4;\
641
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
642
}
643

    
644
#define POPW(ssp, sp, sp_mask, val)\
645
{\
646
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
647
    sp += 2;\
648
}
649

    
650
#define POPL(ssp, sp, sp_mask, val)\
651
{\
652
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
653
    sp += 4;\
654
}
655

    
656
/* protected mode interrupt */
657
static void do_interrupt_protected(int intno, int is_int, int error_code,
658
                                   unsigned int next_eip, int is_hw)
659
{
660
    SegmentCache *dt;
661
    target_ulong ptr, ssp;
662
    int type, dpl, selector, ss_dpl, cpl;
663
    int has_error_code, new_stack, shift;
664
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
665
    uint32_t old_eip, sp_mask;
666

    
667
    has_error_code = 0;
668
    if (!is_int && !is_hw)
669
        has_error_code = exeption_has_error_code(intno);
670
    if (is_int)
671
        old_eip = next_eip;
672
    else
673
        old_eip = env->eip;
674

    
675
    dt = &env->idt;
676
    if (intno * 8 + 7 > dt->limit)
677
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
678
    ptr = dt->base + intno * 8;
679
    e1 = ldl_kernel(ptr);
680
    e2 = ldl_kernel(ptr + 4);
681
    /* check gate type */
682
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
683
    switch(type) {
684
    case 5: /* task gate */
685
        /* must do that check here to return the correct error code */
686
        if (!(e2 & DESC_P_MASK))
687
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
688
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
689
        if (has_error_code) {
690
            int type;
691
            uint32_t mask;
692
            /* push the error code */
693
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
694
            shift = type >> 3;
695
            if (env->segs[R_SS].flags & DESC_B_MASK)
696
                mask = 0xffffffff;
697
            else
698
                mask = 0xffff;
699
            esp = (ESP - (2 << shift)) & mask;
700
            ssp = env->segs[R_SS].base + esp;
701
            if (shift)
702
                stl_kernel(ssp, error_code);
703
            else
704
                stw_kernel(ssp, error_code);
705
            SET_ESP(esp, mask);
706
        }
707
        return;
708
    case 6: /* 286 interrupt gate */
709
    case 7: /* 286 trap gate */
710
    case 14: /* 386 interrupt gate */
711
    case 15: /* 386 trap gate */
712
        break;
713
    default:
714
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
715
        break;
716
    }
717
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
718
    cpl = env->hflags & HF_CPL_MASK;
719
    /* check privilege if software int */
720
    if (is_int && dpl < cpl)
721
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
722
    /* check valid bit */
723
    if (!(e2 & DESC_P_MASK))
724
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
725
    selector = e1 >> 16;
726
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
727
    if ((selector & 0xfffc) == 0)
728
        raise_exception_err(EXCP0D_GPF, 0);
729

    
730
    if (load_segment(&e1, &e2, selector) != 0)
731
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
733
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
735
    if (dpl > cpl)
736
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737
    if (!(e2 & DESC_P_MASK))
738
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
739
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
740
        /* to inner privilege */
741
        get_ss_esp_from_tss(&ss, &esp, dpl);
742
        if ((ss & 0xfffc) == 0)
743
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744
        if ((ss & 3) != dpl)
745
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
749
        if (ss_dpl != dpl)
750
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751
        if (!(ss_e2 & DESC_S_MASK) ||
752
            (ss_e2 & DESC_CS_MASK) ||
753
            !(ss_e2 & DESC_W_MASK))
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        if (!(ss_e2 & DESC_P_MASK))
756
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757
        new_stack = 1;
758
        sp_mask = get_sp_mask(ss_e2);
759
        ssp = get_seg_base(ss_e1, ss_e2);
760
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
761
        /* to same privilege */
762
        if (env->eflags & VM_MASK)
763
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
764
        new_stack = 0;
765
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
766
        ssp = env->segs[R_SS].base;
767
        esp = ESP;
768
        dpl = cpl;
769
    } else {
770
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
771
        new_stack = 0; /* avoid warning */
772
        sp_mask = 0; /* avoid warning */
773
        ssp = 0; /* avoid warning */
774
        esp = 0; /* avoid warning */
775
    }
776

    
777
    shift = type >> 3;
778

    
779
#if 0
780
    /* XXX: check that enough room is available */
781
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
782
    if (env->eflags & VM_MASK)
783
        push_size += 8;
784
    push_size <<= shift;
785
#endif
786
    if (shift == 1) {
787
        if (new_stack) {
788
            if (env->eflags & VM_MASK) {
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
793
            }
794
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795
            PUSHL(ssp, esp, sp_mask, ESP);
796
        }
797
        PUSHL(ssp, esp, sp_mask, compute_eflags());
798
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799
        PUSHL(ssp, esp, sp_mask, old_eip);
800
        if (has_error_code) {
801
            PUSHL(ssp, esp, sp_mask, error_code);
802
        }
803
    } else {
804
        if (new_stack) {
805
            if (env->eflags & VM_MASK) {
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
810
            }
811
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812
            PUSHW(ssp, esp, sp_mask, ESP);
813
        }
814
        PUSHW(ssp, esp, sp_mask, compute_eflags());
815
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816
        PUSHW(ssp, esp, sp_mask, old_eip);
817
        if (has_error_code) {
818
            PUSHW(ssp, esp, sp_mask, error_code);
819
        }
820
    }
821

    
822
    if (new_stack) {
823
        if (env->eflags & VM_MASK) {
824
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
828
        }
829
        ss = (ss & ~3) | dpl;
830
        cpu_x86_load_seg_cache(env, R_SS, ss,
831
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
832
    }
833
    SET_ESP(esp, sp_mask);
834

    
835
    selector = (selector & ~3) | dpl;
836
    cpu_x86_load_seg_cache(env, R_CS, selector,
837
                   get_seg_base(e1, e2),
838
                   get_seg_limit(e1, e2),
839
                   e2);
840
    cpu_x86_set_cpl(env, dpl);
841
    env->eip = offset;
842

    
843
    /* interrupt gate clear IF mask */
844
    if ((type & 1) == 0) {
845
        env->eflags &= ~IF_MASK;
846
    }
847
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848
}
849

    
850
#ifdef TARGET_X86_64
851

    
852
#define PUSHQ(sp, val)\
853
{\
854
    sp -= 8;\
855
    stq_kernel(sp, (val));\
856
}
857

    
858
#define POPQ(sp, val)\
859
{\
860
    val = ldq_kernel(sp);\
861
    sp += 8;\
862
}
863

    
864
static inline target_ulong get_rsp_from_tss(int level)
865
{
866
    int index;
867

    
868
#if 0
869
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870
           env->tr.base, env->tr.limit);
871
#endif
872

    
873
    if (!(env->tr.flags & DESC_P_MASK))
874
        cpu_abort(env, "invalid tss");
875
    index = 8 * level + 4;
876
    if ((index + 7) > env->tr.limit)
877
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
878
    return ldq_kernel(env->tr.base + index);
879
}
880

    
881
/* 64 bit interrupt */
882
static void do_interrupt64(int intno, int is_int, int error_code,
883
                           target_ulong next_eip, int is_hw)
884
{
885
    SegmentCache *dt;
886
    target_ulong ptr;
887
    int type, dpl, selector, cpl, ist;
888
    int has_error_code, new_stack;
889
    uint32_t e1, e2, e3, ss;
890
    target_ulong old_eip, esp, offset;
891

    
892
    has_error_code = 0;
893
    if (!is_int && !is_hw)
894
        has_error_code = exeption_has_error_code(intno);
895
    if (is_int)
896
        old_eip = next_eip;
897
    else
898
        old_eip = env->eip;
899

    
900
    dt = &env->idt;
901
    if (intno * 16 + 15 > dt->limit)
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
    ptr = dt->base + intno * 16;
904
    e1 = ldl_kernel(ptr);
905
    e2 = ldl_kernel(ptr + 4);
906
    e3 = ldl_kernel(ptr + 8);
907
    /* check gate type */
908
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
909
    switch(type) {
910
    case 14: /* 386 interrupt gate */
911
    case 15: /* 386 trap gate */
912
        break;
913
    default:
914
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
915
        break;
916
    }
917
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918
    cpl = env->hflags & HF_CPL_MASK;
919
    /* check privilege if software int */
920
    if (is_int && dpl < cpl)
921
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922
    /* check valid bit */
923
    if (!(e2 & DESC_P_MASK))
924
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
925
    selector = e1 >> 16;
926
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
927
    ist = e2 & 7;
928
    if ((selector & 0xfffc) == 0)
929
        raise_exception_err(EXCP0D_GPF, 0);
930

    
931
    if (load_segment(&e1, &e2, selector) != 0)
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
934
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936
    if (dpl > cpl)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_P_MASK))
939
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
940
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
943
        /* to inner privilege */
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = get_rsp_from_tss(dpl);
948
        esp &= ~0xfLL; /* align stack */
949
        ss = 0;
950
        new_stack = 1;
951
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952
        /* to same privilege */
953
        if (env->eflags & VM_MASK)
954
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0;
956
        if (ist != 0)
957
            esp = get_rsp_from_tss(ist + 3);
958
        else
959
            esp = ESP;
960
        esp &= ~0xfLL; /* align stack */
961
        dpl = cpl;
962
    } else {
963
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964
        new_stack = 0; /* avoid warning */
965
        esp = 0; /* avoid warning */
966
    }
967

    
968
    PUSHQ(esp, env->segs[R_SS].selector);
969
    PUSHQ(esp, ESP);
970
    PUSHQ(esp, compute_eflags());
971
    PUSHQ(esp, env->segs[R_CS].selector);
972
    PUSHQ(esp, old_eip);
973
    if (has_error_code) {
974
        PUSHQ(esp, error_code);
975
    }
976

    
977
    if (new_stack) {
978
        ss = 0 | dpl;
979
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
980
    }
981
    ESP = esp;
982

    
983
    selector = (selector & ~3) | dpl;
984
    cpu_x86_load_seg_cache(env, R_CS, selector,
985
                   get_seg_base(e1, e2),
986
                   get_seg_limit(e1, e2),
987
                   e2);
988
    cpu_x86_set_cpl(env, dpl);
989
    env->eip = offset;
990

    
991
    /* interrupt gate clear IF mask */
992
    if ((type & 1) == 0) {
993
        env->eflags &= ~IF_MASK;
994
    }
995
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
996
}
997
#endif
998

    
999
#ifdef TARGET_X86_64
1000
#if defined(CONFIG_USER_ONLY)
1001
void helper_syscall(int next_eip_addend)
1002
{
1003
    env->exception_index = EXCP_SYSCALL;
1004
    env->exception_next_eip = env->eip + next_eip_addend;
1005
    cpu_loop_exit();
1006
}
1007
#else
1008
void helper_syscall(int next_eip_addend)
1009
{
1010
    int selector;
1011

    
1012
    if (!(env->efer & MSR_EFER_SCE)) {
1013
        raise_exception_err(EXCP06_ILLOP, 0);
1014
    }
1015
    selector = (env->star >> 32) & 0xffff;
1016
    if (env->hflags & HF_LMA_MASK) {
1017
        int code64;
1018

    
1019
        ECX = env->eip + next_eip_addend;
1020
        env->regs[11] = compute_eflags();
1021

    
1022
        code64 = env->hflags & HF_CS64_MASK;
1023

    
1024
        cpu_x86_set_cpl(env, 0);
1025
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026
                           0, 0xffffffff,
1027
                               DESC_G_MASK | DESC_P_MASK |
1028
                               DESC_S_MASK |
1029
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1030
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031
                               0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_W_MASK | DESC_A_MASK);
1035
        env->eflags &= ~env->fmask;
1036
        load_eflags(env->eflags, 0);
1037
        if (code64)
1038
            env->eip = env->lstar;
1039
        else
1040
            env->eip = env->cstar;
1041
    } else {
1042
        ECX = (uint32_t)(env->eip + next_eip_addend);
1043

    
1044
        cpu_x86_set_cpl(env, 0);
1045
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1046
                           0, 0xffffffff,
1047
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048
                               DESC_S_MASK |
1049
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1050
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1051
                               0, 0xffffffff,
1052
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053
                               DESC_S_MASK |
1054
                               DESC_W_MASK | DESC_A_MASK);
1055
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1056
        env->eip = (uint32_t)env->star;
1057
    }
1058
}
1059
#endif
1060
#endif
1061

    
1062
#ifdef TARGET_X86_64
1063
void helper_sysret(int dflag)
1064
{
1065
    int cpl, selector;
1066

    
1067
    if (!(env->efer & MSR_EFER_SCE)) {
1068
        raise_exception_err(EXCP06_ILLOP, 0);
1069
    }
1070
    cpl = env->hflags & HF_CPL_MASK;
1071
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1072
        raise_exception_err(EXCP0D_GPF, 0);
1073
    }
1074
    selector = (env->star >> 48) & 0xffff;
1075
    if (env->hflags & HF_LMA_MASK) {
1076
        if (dflag == 2) {
1077
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
                                   DESC_L_MASK);
1083
            env->eip = ECX;
1084
        } else {
1085
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086
                                   0, 0xffffffff,
1087
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090
            env->eip = (uint32_t)ECX;
1091
        }
1092
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_W_MASK | DESC_A_MASK);
1097
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
        cpu_x86_set_cpl(env, 3);
1100
    } else {
1101
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106
        env->eip = (uint32_t)ECX;
1107
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108
                               0, 0xffffffff,
1109
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111
                               DESC_W_MASK | DESC_A_MASK);
1112
        env->eflags |= IF_MASK;
1113
        cpu_x86_set_cpl(env, 3);
1114
    }
1115
}
1116
#endif
1117

    
1118
/* real mode interrupt */
1119
static void do_interrupt_real(int intno, int is_int, int error_code,
1120
                              unsigned int next_eip)
1121
{
1122
    SegmentCache *dt;
1123
    target_ulong ptr, ssp;
1124
    int selector;
1125
    uint32_t offset, esp;
1126
    uint32_t old_cs, old_eip;
1127

    
1128
    /* real mode (simpler !) */
1129
    dt = &env->idt;
1130
    if (intno * 4 + 3 > dt->limit)
1131
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1132
    ptr = dt->base + intno * 4;
1133
    offset = lduw_kernel(ptr);
1134
    selector = lduw_kernel(ptr + 2);
1135
    esp = ESP;
1136
    ssp = env->segs[R_SS].base;
1137
    if (is_int)
1138
        old_eip = next_eip;
1139
    else
1140
        old_eip = env->eip;
1141
    old_cs = env->segs[R_CS].selector;
1142
    /* XXX: use SS segment size ? */
1143
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1144
    PUSHW(ssp, esp, 0xffff, old_cs);
1145
    PUSHW(ssp, esp, 0xffff, old_eip);
1146

    
1147
    /* update processor state */
1148
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1149
    env->eip = offset;
1150
    env->segs[R_CS].selector = selector;
1151
    env->segs[R_CS].base = (selector << 4);
1152
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1153
}
1154

    
1155
/* fake user mode interrupt */
1156
void do_interrupt_user(int intno, int is_int, int error_code,
1157
                       target_ulong next_eip)
1158
{
1159
    SegmentCache *dt;
1160
    target_ulong ptr;
1161
    int dpl, cpl, shift;
1162
    uint32_t e2;
1163

    
1164
    dt = &env->idt;
1165
    if (env->hflags & HF_LMA_MASK) {
1166
        shift = 4;
1167
    } else {
1168
        shift = 3;
1169
    }
1170
    ptr = dt->base + (intno << shift);
1171
    e2 = ldl_kernel(ptr + 4);
1172

    
1173
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1174
    cpl = env->hflags & HF_CPL_MASK;
1175
    /* check privilege if software int */
1176
    if (is_int && dpl < cpl)
1177
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1178

    
1179
    /* Since we emulate only user space, we cannot do more than
1180
       exiting the emulation with the suitable exception and error
1181
       code */
1182
    if (is_int)
1183
        EIP = next_eip;
1184
}
1185

    
1186
#if !defined(CONFIG_USER_ONLY)
1187
static void handle_even_inj(int intno, int is_int, int error_code,
1188
                int is_hw, int rm)
1189
{
1190
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1191
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1192
            int type;
1193
            if (is_int)
1194
                    type = SVM_EVTINJ_TYPE_SOFT;
1195
            else
1196
                    type = SVM_EVTINJ_TYPE_EXEPT;
1197
            event_inj = intno | type | SVM_EVTINJ_VALID;
1198
            if (!rm && exeption_has_error_code(intno)) {
1199
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1200
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1201
            }
1202
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1203
    }
1204
}
1205
#endif
1206

    
1207
/*
1208
 * Begin execution of an interruption. is_int is TRUE if coming from
1209
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1210
 * instruction. It is only relevant if is_int is TRUE.
1211
 */
1212
void do_interrupt(int intno, int is_int, int error_code,
1213
                  target_ulong next_eip, int is_hw)
1214
{
1215
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1216
        if ((env->cr[0] & CR0_PE_MASK)) {
1217
            static int count;
1218
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1219
                    count, intno, error_code, is_int,
1220
                    env->hflags & HF_CPL_MASK,
1221
                    env->segs[R_CS].selector, EIP,
1222
                    (int)env->segs[R_CS].base + EIP,
1223
                    env->segs[R_SS].selector, ESP);
1224
            if (intno == 0x0e) {
1225
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1226
            } else {
1227
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1228
            }
1229
            qemu_log("\n");
1230
            log_cpu_state(env, X86_DUMP_CCOP);
1231
#if 0
1232
            {
1233
                int i;
1234
                target_ulong ptr;
1235
                qemu_log("       code=");
1236
                ptr = env->segs[R_CS].base + env->eip;
1237
                for(i = 0; i < 16; i++) {
1238
                    qemu_log(" %02x", ldub(ptr + i));
1239
                }
1240
                qemu_log("\n");
1241
            }
1242
#endif
1243
            count++;
1244
        }
1245
    }
1246
    if (env->cr[0] & CR0_PE_MASK) {
1247
#if !defined(CONFIG_USER_ONLY)
1248
        if (env->hflags & HF_SVMI_MASK)
1249
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1250
#endif
1251
#ifdef TARGET_X86_64
1252
        if (env->hflags & HF_LMA_MASK) {
1253
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1254
        } else
1255
#endif
1256
        {
1257
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258
        }
1259
    } else {
1260
#if !defined(CONFIG_USER_ONLY)
1261
        if (env->hflags & HF_SVMI_MASK)
1262
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1263
#endif
1264
        do_interrupt_real(intno, is_int, error_code, next_eip);
1265
    }
1266

    
1267
#if !defined(CONFIG_USER_ONLY)
1268
    if (env->hflags & HF_SVMI_MASK) {
1269
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1270
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1271
    }
1272
#endif
1273
}
1274

    
1275
/* This should come from sysemu.h - if we could include it here... */
1276
void qemu_system_reset_request(void);
1277

    
1278
/*
1279
 * Check nested exceptions and change to double or triple fault if
1280
 * needed. It should only be called, if this is not an interrupt.
1281
 * Returns the new exception number.
1282
 */
1283
static int check_exception(int intno, int *error_code)
1284
{
1285
    int first_contributory = env->old_exception == 0 ||
1286
                              (env->old_exception >= 10 &&
1287
                               env->old_exception <= 13);
1288
    int second_contributory = intno == 0 ||
1289
                               (intno >= 10 && intno <= 13);
1290

    
1291
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1292
                env->old_exception, intno);
1293

    
1294
#if !defined(CONFIG_USER_ONLY)
1295
    if (env->old_exception == EXCP08_DBLE) {
1296
        if (env->hflags & HF_SVMI_MASK)
1297
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1298

    
1299
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1300

    
1301
        qemu_system_reset_request();
1302
        return EXCP_HLT;
1303
    }
1304
#endif
1305

    
1306
    if ((first_contributory && second_contributory)
1307
        || (env->old_exception == EXCP0E_PAGE &&
1308
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1309
        intno = EXCP08_DBLE;
1310
        *error_code = 0;
1311
    }
1312

    
1313
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1314
        (intno == EXCP08_DBLE))
1315
        env->old_exception = intno;
1316

    
1317
    return intno;
1318
}
1319

    
1320
/*
1321
 * Signal an interruption. It is executed in the main CPU loop.
1322
 * is_int is TRUE if coming from the int instruction. next_eip is the
1323
 * EIP value AFTER the interrupt instruction. It is only relevant if
1324
 * is_int is TRUE.
1325
 */
1326
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1327
                                          int next_eip_addend)
1328
{
1329
    if (!is_int) {
1330
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1331
        intno = check_exception(intno, &error_code);
1332
    } else {
1333
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1334
    }
1335

    
1336
    env->exception_index = intno;
1337
    env->error_code = error_code;
1338
    env->exception_is_int = is_int;
1339
    env->exception_next_eip = env->eip + next_eip_addend;
1340
    cpu_loop_exit();
1341
}
1342

    
1343
/* shortcuts to generate exceptions */
1344

    
1345
void raise_exception_err(int exception_index, int error_code)
1346
{
1347
    raise_interrupt(exception_index, 0, error_code, 0);
1348
}
1349

    
1350
void raise_exception(int exception_index)
1351
{
1352
    raise_interrupt(exception_index, 0, 0, 0);
1353
}
1354

    
1355
void raise_exception_env(int exception_index, CPUState *nenv)
1356
{
1357
    env = nenv;
1358
    raise_exception(exception_index);
1359
}
1360
/* SMM support */
1361

    
1362
#if defined(CONFIG_USER_ONLY)
1363

    
1364
void do_smm_enter(void)
1365
{
1366
}
1367

    
1368
void helper_rsm(void)
1369
{
1370
}
1371

    
1372
#else
1373

    
1374
#ifdef TARGET_X86_64
1375
#define SMM_REVISION_ID 0x00020064
1376
#else
1377
#define SMM_REVISION_ID 0x00020000
1378
#endif
1379

    
1380
void do_smm_enter(void)
1381
{
1382
    target_ulong sm_state;
1383
    SegmentCache *dt;
1384
    int i, offset;
1385

    
1386
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1387
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1388

    
1389
    env->hflags |= HF_SMM_MASK;
1390
    cpu_smm_update(env);
1391

    
1392
    sm_state = env->smbase + 0x8000;
1393

    
1394
#ifdef TARGET_X86_64
1395
    for(i = 0; i < 6; i++) {
1396
        dt = &env->segs[i];
1397
        offset = 0x7e00 + i * 16;
1398
        stw_phys(sm_state + offset, dt->selector);
1399
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1400
        stl_phys(sm_state + offset + 4, dt->limit);
1401
        stq_phys(sm_state + offset + 8, dt->base);
1402
    }
1403

    
1404
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1405
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1406

    
1407
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1408
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1409
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1410
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1411

    
1412
    stq_phys(sm_state + 0x7e88, env->idt.base);
1413
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1414

    
1415
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1416
    stq_phys(sm_state + 0x7e98, env->tr.base);
1417
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1418
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1419

    
1420
    stq_phys(sm_state + 0x7ed0, env->efer);
1421

    
1422
    stq_phys(sm_state + 0x7ff8, EAX);
1423
    stq_phys(sm_state + 0x7ff0, ECX);
1424
    stq_phys(sm_state + 0x7fe8, EDX);
1425
    stq_phys(sm_state + 0x7fe0, EBX);
1426
    stq_phys(sm_state + 0x7fd8, ESP);
1427
    stq_phys(sm_state + 0x7fd0, EBP);
1428
    stq_phys(sm_state + 0x7fc8, ESI);
1429
    stq_phys(sm_state + 0x7fc0, EDI);
1430
    for(i = 8; i < 16; i++)
1431
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1432
    stq_phys(sm_state + 0x7f78, env->eip);
1433
    stl_phys(sm_state + 0x7f70, compute_eflags());
1434
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1435
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1436

    
1437
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1438
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1439
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1440

    
1441
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1442
    stl_phys(sm_state + 0x7f00, env->smbase);
1443
#else
1444
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1445
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1446
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1447
    stl_phys(sm_state + 0x7ff0, env->eip);
1448
    stl_phys(sm_state + 0x7fec, EDI);
1449
    stl_phys(sm_state + 0x7fe8, ESI);
1450
    stl_phys(sm_state + 0x7fe4, EBP);
1451
    stl_phys(sm_state + 0x7fe0, ESP);
1452
    stl_phys(sm_state + 0x7fdc, EBX);
1453
    stl_phys(sm_state + 0x7fd8, EDX);
1454
    stl_phys(sm_state + 0x7fd4, ECX);
1455
    stl_phys(sm_state + 0x7fd0, EAX);
1456
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1457
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1458

    
1459
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1460
    stl_phys(sm_state + 0x7f64, env->tr.base);
1461
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1462
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1463

    
1464
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1465
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1466
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1467
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1468

    
1469
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1470
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1471

    
1472
    stl_phys(sm_state + 0x7f58, env->idt.base);
1473
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1474

    
1475
    for(i = 0; i < 6; i++) {
1476
        dt = &env->segs[i];
1477
        if (i < 3)
1478
            offset = 0x7f84 + i * 12;
1479
        else
1480
            offset = 0x7f2c + (i - 3) * 12;
1481
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1482
        stl_phys(sm_state + offset + 8, dt->base);
1483
        stl_phys(sm_state + offset + 4, dt->limit);
1484
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1485
    }
1486
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1487

    
1488
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1489
    stl_phys(sm_state + 0x7ef8, env->smbase);
1490
#endif
1491
    /* init SMM cpu state */
1492

    
1493
#ifdef TARGET_X86_64
1494
    cpu_load_efer(env, 0);
1495
#endif
1496
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1497
    env->eip = 0x00008000;
1498
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1499
                           0xffffffff, 0);
1500
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1501
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1502
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1503
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1505

    
1506
    cpu_x86_update_cr0(env,
1507
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1508
    cpu_x86_update_cr4(env, 0);
1509
    env->dr[7] = 0x00000400;
1510
    CC_OP = CC_OP_EFLAGS;
1511
}
1512

    
1513
void helper_rsm(void)
1514
{
1515
    target_ulong sm_state;
1516
    int i, offset;
1517
    uint32_t val;
1518

    
1519
    sm_state = env->smbase + 0x8000;
1520
#ifdef TARGET_X86_64
1521
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1522

    
1523
    for(i = 0; i < 6; i++) {
1524
        offset = 0x7e00 + i * 16;
1525
        cpu_x86_load_seg_cache(env, i,
1526
                               lduw_phys(sm_state + offset),
1527
                               ldq_phys(sm_state + offset + 8),
1528
                               ldl_phys(sm_state + offset + 4),
1529
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1530
    }
1531

    
1532
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1533
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1534

    
1535
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1536
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1537
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1538
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1539

    
1540
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1541
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1542

    
1543
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1544
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1545
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1546
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1547

    
1548
    EAX = ldq_phys(sm_state + 0x7ff8);
1549
    ECX = ldq_phys(sm_state + 0x7ff0);
1550
    EDX = ldq_phys(sm_state + 0x7fe8);
1551
    EBX = ldq_phys(sm_state + 0x7fe0);
1552
    ESP = ldq_phys(sm_state + 0x7fd8);
1553
    EBP = ldq_phys(sm_state + 0x7fd0);
1554
    ESI = ldq_phys(sm_state + 0x7fc8);
1555
    EDI = ldq_phys(sm_state + 0x7fc0);
1556
    for(i = 8; i < 16; i++)
1557
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1558
    env->eip = ldq_phys(sm_state + 0x7f78);
1559
    load_eflags(ldl_phys(sm_state + 0x7f70),
1560
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1561
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1562
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1563

    
1564
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1565
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1566
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1567

    
1568
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1569
    if (val & 0x20000) {
1570
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1571
    }
1572
#else
1573
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1574
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1575
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1576
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1577
    env->eip = ldl_phys(sm_state + 0x7ff0);
1578
    EDI = ldl_phys(sm_state + 0x7fec);
1579
    ESI = ldl_phys(sm_state + 0x7fe8);
1580
    EBP = ldl_phys(sm_state + 0x7fe4);
1581
    ESP = ldl_phys(sm_state + 0x7fe0);
1582
    EBX = ldl_phys(sm_state + 0x7fdc);
1583
    EDX = ldl_phys(sm_state + 0x7fd8);
1584
    ECX = ldl_phys(sm_state + 0x7fd4);
1585
    EAX = ldl_phys(sm_state + 0x7fd0);
1586
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1587
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1588

    
1589
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1590
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1591
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1592
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1593

    
1594
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1595
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1596
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1597
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1598

    
1599
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1600
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1601

    
1602
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1603
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1604

    
1605
    for(i = 0; i < 6; i++) {
1606
        if (i < 3)
1607
            offset = 0x7f84 + i * 12;
1608
        else
1609
            offset = 0x7f2c + (i - 3) * 12;
1610
        cpu_x86_load_seg_cache(env, i,
1611
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1612
                               ldl_phys(sm_state + offset + 8),
1613
                               ldl_phys(sm_state + offset + 4),
1614
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1615
    }
1616
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1617

    
1618
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1619
    if (val & 0x20000) {
1620
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1621
    }
1622
#endif
1623
    CC_OP = CC_OP_EFLAGS;
1624
    env->hflags &= ~HF_SMM_MASK;
1625
    cpu_smm_update(env);
1626

    
1627
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1628
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1629
}
1630

    
1631
#endif /* !CONFIG_USER_ONLY */
1632

    
1633

    
1634
/* division, flags are undefined */
1635

    
1636
void helper_divb_AL(target_ulong t0)
1637
{
1638
    unsigned int num, den, q, r;
1639

    
1640
    num = (EAX & 0xffff);
1641
    den = (t0 & 0xff);
1642
    if (den == 0) {
1643
        raise_exception(EXCP00_DIVZ);
1644
    }
1645
    q = (num / den);
1646
    if (q > 0xff)
1647
        raise_exception(EXCP00_DIVZ);
1648
    q &= 0xff;
1649
    r = (num % den) & 0xff;
1650
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1651
}
1652

    
1653
void helper_idivb_AL(target_ulong t0)
1654
{
1655
    int num, den, q, r;
1656

    
1657
    num = (int16_t)EAX;
1658
    den = (int8_t)t0;
1659
    if (den == 0) {
1660
        raise_exception(EXCP00_DIVZ);
1661
    }
1662
    q = (num / den);
1663
    if (q != (int8_t)q)
1664
        raise_exception(EXCP00_DIVZ);
1665
    q &= 0xff;
1666
    r = (num % den) & 0xff;
1667
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1668
}
1669

    
1670
void helper_divw_AX(target_ulong t0)
1671
{
1672
    unsigned int num, den, q, r;
1673

    
1674
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1675
    den = (t0 & 0xffff);
1676
    if (den == 0) {
1677
        raise_exception(EXCP00_DIVZ);
1678
    }
1679
    q = (num / den);
1680
    if (q > 0xffff)
1681
        raise_exception(EXCP00_DIVZ);
1682
    q &= 0xffff;
1683
    r = (num % den) & 0xffff;
1684
    EAX = (EAX & ~0xffff) | q;
1685
    EDX = (EDX & ~0xffff) | r;
1686
}
1687

    
1688
void helper_idivw_AX(target_ulong t0)
1689
{
1690
    int num, den, q, r;
1691

    
1692
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1693
    den = (int16_t)t0;
1694
    if (den == 0) {
1695
        raise_exception(EXCP00_DIVZ);
1696
    }
1697
    q = (num / den);
1698
    if (q != (int16_t)q)
1699
        raise_exception(EXCP00_DIVZ);
1700
    q &= 0xffff;
1701
    r = (num % den) & 0xffff;
1702
    EAX = (EAX & ~0xffff) | q;
1703
    EDX = (EDX & ~0xffff) | r;
1704
}
1705

    
1706
void helper_divl_EAX(target_ulong t0)
1707
{
1708
    unsigned int den, r;
1709
    uint64_t num, q;
1710

    
1711
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1712
    den = t0;
1713
    if (den == 0) {
1714
        raise_exception(EXCP00_DIVZ);
1715
    }
1716
    q = (num / den);
1717
    r = (num % den);
1718
    if (q > 0xffffffff)
1719
        raise_exception(EXCP00_DIVZ);
1720
    EAX = (uint32_t)q;
1721
    EDX = (uint32_t)r;
1722
}
1723

    
1724
void helper_idivl_EAX(target_ulong t0)
1725
{
1726
    int den, r;
1727
    int64_t num, q;
1728

    
1729
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1730
    den = t0;
1731
    if (den == 0) {
1732
        raise_exception(EXCP00_DIVZ);
1733
    }
1734
    q = (num / den);
1735
    r = (num % den);
1736
    if (q != (int32_t)q)
1737
        raise_exception(EXCP00_DIVZ);
1738
    EAX = (uint32_t)q;
1739
    EDX = (uint32_t)r;
1740
}
1741

    
1742
/* bcd */
1743

    
1744
/* XXX: exception */
1745
void helper_aam(int base)
1746
{
1747
    int al, ah;
1748
    al = EAX & 0xff;
1749
    ah = al / base;
1750
    al = al % base;
1751
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1752
    CC_DST = al;
1753
}
1754

    
1755
void helper_aad(int base)
1756
{
1757
    int al, ah;
1758
    al = EAX & 0xff;
1759
    ah = (EAX >> 8) & 0xff;
1760
    al = ((ah * base) + al) & 0xff;
1761
    EAX = (EAX & ~0xffff) | al;
1762
    CC_DST = al;
1763
}
1764

    
1765
void helper_aaa(void)
1766
{
1767
    int icarry;
1768
    int al, ah, af;
1769
    int eflags;
1770

    
1771
    eflags = helper_cc_compute_all(CC_OP);
1772
    af = eflags & CC_A;
1773
    al = EAX & 0xff;
1774
    ah = (EAX >> 8) & 0xff;
1775

    
1776
    icarry = (al > 0xf9);
1777
    if (((al & 0x0f) > 9 ) || af) {
1778
        al = (al + 6) & 0x0f;
1779
        ah = (ah + 1 + icarry) & 0xff;
1780
        eflags |= CC_C | CC_A;
1781
    } else {
1782
        eflags &= ~(CC_C | CC_A);
1783
        al &= 0x0f;
1784
    }
1785
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1786
    CC_SRC = eflags;
1787
}
1788

    
1789
void helper_aas(void)
1790
{
1791
    int icarry;
1792
    int al, ah, af;
1793
    int eflags;
1794

    
1795
    eflags = helper_cc_compute_all(CC_OP);
1796
    af = eflags & CC_A;
1797
    al = EAX & 0xff;
1798
    ah = (EAX >> 8) & 0xff;
1799

    
1800
    icarry = (al < 6);
1801
    if (((al & 0x0f) > 9 ) || af) {
1802
        al = (al - 6) & 0x0f;
1803
        ah = (ah - 1 - icarry) & 0xff;
1804
        eflags |= CC_C | CC_A;
1805
    } else {
1806
        eflags &= ~(CC_C | CC_A);
1807
        al &= 0x0f;
1808
    }
1809
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1810
    CC_SRC = eflags;
1811
}
1812

    
1813
void helper_daa(void)
1814
{
1815
    int al, af, cf;
1816
    int eflags;
1817

    
1818
    eflags = helper_cc_compute_all(CC_OP);
1819
    cf = eflags & CC_C;
1820
    af = eflags & CC_A;
1821
    al = EAX & 0xff;
1822

    
1823
    eflags = 0;
1824
    if (((al & 0x0f) > 9 ) || af) {
1825
        al = (al + 6) & 0xff;
1826
        eflags |= CC_A;
1827
    }
1828
    if ((al > 0x9f) || cf) {
1829
        al = (al + 0x60) & 0xff;
1830
        eflags |= CC_C;
1831
    }
1832
    EAX = (EAX & ~0xff) | al;
1833
    /* well, speed is not an issue here, so we compute the flags by hand */
1834
    eflags |= (al == 0) << 6; /* zf */
1835
    eflags |= parity_table[al]; /* pf */
1836
    eflags |= (al & 0x80); /* sf */
1837
    CC_SRC = eflags;
1838
}
1839

    
1840
void helper_das(void)
1841
{
1842
    int al, al1, af, cf;
1843
    int eflags;
1844

    
1845
    eflags = helper_cc_compute_all(CC_OP);
1846
    cf = eflags & CC_C;
1847
    af = eflags & CC_A;
1848
    al = EAX & 0xff;
1849

    
1850
    eflags = 0;
1851
    al1 = al;
1852
    if (((al & 0x0f) > 9 ) || af) {
1853
        eflags |= CC_A;
1854
        if (al < 6 || cf)
1855
            eflags |= CC_C;
1856
        al = (al - 6) & 0xff;
1857
    }
1858
    if ((al1 > 0x99) || cf) {
1859
        al = (al - 0x60) & 0xff;
1860
        eflags |= CC_C;
1861
    }
1862
    EAX = (EAX & ~0xff) | al;
1863
    /* well, speed is not an issue here, so we compute the flags by hand */
1864
    eflags |= (al == 0) << 6; /* zf */
1865
    eflags |= parity_table[al]; /* pf */
1866
    eflags |= (al & 0x80); /* sf */
1867
    CC_SRC = eflags;
1868
}
1869

    
1870
void helper_into(int next_eip_addend)
1871
{
1872
    int eflags;
1873
    eflags = helper_cc_compute_all(CC_OP);
1874
    if (eflags & CC_O) {
1875
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1876
    }
1877
}
1878

    
1879
void helper_cmpxchg8b(target_ulong a0)
1880
{
1881
    uint64_t d;
1882
    int eflags;
1883

    
1884
    eflags = helper_cc_compute_all(CC_OP);
1885
    d = ldq(a0);
1886
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1887
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1888
        eflags |= CC_Z;
1889
    } else {
1890
        /* always do the store */
1891
        stq(a0, d); 
1892
        EDX = (uint32_t)(d >> 32);
1893
        EAX = (uint32_t)d;
1894
        eflags &= ~CC_Z;
1895
    }
1896
    CC_SRC = eflags;
1897
}
1898

    
1899
#ifdef TARGET_X86_64
1900
void helper_cmpxchg16b(target_ulong a0)
1901
{
1902
    uint64_t d0, d1;
1903
    int eflags;
1904

    
1905
    if ((a0 & 0xf) != 0)
1906
        raise_exception(EXCP0D_GPF);
1907
    eflags = helper_cc_compute_all(CC_OP);
1908
    d0 = ldq(a0);
1909
    d1 = ldq(a0 + 8);
1910
    if (d0 == EAX && d1 == EDX) {
1911
        stq(a0, EBX);
1912
        stq(a0 + 8, ECX);
1913
        eflags |= CC_Z;
1914
    } else {
1915
        /* always do the store */
1916
        stq(a0, d0); 
1917
        stq(a0 + 8, d1); 
1918
        EDX = d1;
1919
        EAX = d0;
1920
        eflags &= ~CC_Z;
1921
    }
1922
    CC_SRC = eflags;
1923
}
1924
#endif
1925

    
1926
void helper_single_step(void)
1927
{
1928
#ifndef CONFIG_USER_ONLY
1929
    check_hw_breakpoints(env, 1);
1930
    env->dr[6] |= DR6_BS;
1931
#endif
1932
    raise_exception(EXCP01_DB);
1933
}
1934

    
1935
void helper_cpuid(void)
1936
{
1937
    uint32_t eax, ebx, ecx, edx;
1938

    
1939
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1940

    
1941
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1942
    EAX = eax;
1943
    EBX = ebx;
1944
    ECX = ecx;
1945
    EDX = edx;
1946
}
1947

    
1948
void helper_enter_level(int level, int data32, target_ulong t1)
1949
{
1950
    target_ulong ssp;
1951
    uint32_t esp_mask, esp, ebp;
1952

    
1953
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1954
    ssp = env->segs[R_SS].base;
1955
    ebp = EBP;
1956
    esp = ESP;
1957
    if (data32) {
1958
        /* 32 bit */
1959
        esp -= 4;
1960
        while (--level) {
1961
            esp -= 4;
1962
            ebp -= 4;
1963
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1964
        }
1965
        esp -= 4;
1966
        stl(ssp + (esp & esp_mask), t1);
1967
    } else {
1968
        /* 16 bit */
1969
        esp -= 2;
1970
        while (--level) {
1971
            esp -= 2;
1972
            ebp -= 2;
1973
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1974
        }
1975
        esp -= 2;
1976
        stw(ssp + (esp & esp_mask), t1);
1977
    }
1978
}
1979

    
1980
#ifdef TARGET_X86_64
1981
void helper_enter64_level(int level, int data64, target_ulong t1)
1982
{
1983
    target_ulong esp, ebp;
1984
    ebp = EBP;
1985
    esp = ESP;
1986

    
1987
    if (data64) {
1988
        /* 64 bit */
1989
        esp -= 8;
1990
        while (--level) {
1991
            esp -= 8;
1992
            ebp -= 8;
1993
            stq(esp, ldq(ebp));
1994
        }
1995
        esp -= 8;
1996
        stq(esp, t1);
1997
    } else {
1998
        /* 16 bit */
1999
        esp -= 2;
2000
        while (--level) {
2001
            esp -= 2;
2002
            ebp -= 2;
2003
            stw(esp, lduw(ebp));
2004
        }
2005
        esp -= 2;
2006
        stw(esp, t1);
2007
    }
2008
}
2009
#endif
2010

    
2011
void helper_lldt(int selector)
2012
{
2013
    SegmentCache *dt;
2014
    uint32_t e1, e2;
2015
    int index, entry_limit;
2016
    target_ulong ptr;
2017

    
2018
    selector &= 0xffff;
2019
    if ((selector & 0xfffc) == 0) {
2020
        /* XXX: NULL selector case: invalid LDT */
2021
        env->ldt.base = 0;
2022
        env->ldt.limit = 0;
2023
    } else {
2024
        if (selector & 0x4)
2025
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2026
        dt = &env->gdt;
2027
        index = selector & ~7;
2028
#ifdef TARGET_X86_64
2029
        if (env->hflags & HF_LMA_MASK)
2030
            entry_limit = 15;
2031
        else
2032
#endif
2033
            entry_limit = 7;
2034
        if ((index + entry_limit) > dt->limit)
2035
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2036
        ptr = dt->base + index;
2037
        e1 = ldl_kernel(ptr);
2038
        e2 = ldl_kernel(ptr + 4);
2039
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2040
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041
        if (!(e2 & DESC_P_MASK))
2042
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2043
#ifdef TARGET_X86_64
2044
        if (env->hflags & HF_LMA_MASK) {
2045
            uint32_t e3;
2046
            e3 = ldl_kernel(ptr + 8);
2047
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2048
            env->ldt.base |= (target_ulong)e3 << 32;
2049
        } else
2050
#endif
2051
        {
2052
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2053
        }
2054
    }
2055
    env->ldt.selector = selector;
2056
}
2057

    
2058
void helper_ltr(int selector)
2059
{
2060
    SegmentCache *dt;
2061
    uint32_t e1, e2;
2062
    int index, type, entry_limit;
2063
    target_ulong ptr;
2064

    
2065
    selector &= 0xffff;
2066
    if ((selector & 0xfffc) == 0) {
2067
        /* NULL selector case: invalid TR */
2068
        env->tr.base = 0;
2069
        env->tr.limit = 0;
2070
        env->tr.flags = 0;
2071
    } else {
2072
        if (selector & 0x4)
2073
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2074
        dt = &env->gdt;
2075
        index = selector & ~7;
2076
#ifdef TARGET_X86_64
2077
        if (env->hflags & HF_LMA_MASK)
2078
            entry_limit = 15;
2079
        else
2080
#endif
2081
            entry_limit = 7;
2082
        if ((index + entry_limit) > dt->limit)
2083
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2084
        ptr = dt->base + index;
2085
        e1 = ldl_kernel(ptr);
2086
        e2 = ldl_kernel(ptr + 4);
2087
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2088
        if ((e2 & DESC_S_MASK) ||
2089
            (type != 1 && type != 9))
2090
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2091
        if (!(e2 & DESC_P_MASK))
2092
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2093
#ifdef TARGET_X86_64
2094
        if (env->hflags & HF_LMA_MASK) {
2095
            uint32_t e3, e4;
2096
            e3 = ldl_kernel(ptr + 8);
2097
            e4 = ldl_kernel(ptr + 12);
2098
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2099
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2100
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2101
            env->tr.base |= (target_ulong)e3 << 32;
2102
        } else
2103
#endif
2104
        {
2105
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2106
        }
2107
        e2 |= DESC_TSS_BUSY_MASK;
2108
        stl_kernel(ptr + 4, e2);
2109
    }
2110
    env->tr.selector = selector;
2111
}
2112

    
2113
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2114
void helper_load_seg(int seg_reg, int selector)
2115
{
2116
    uint32_t e1, e2;
2117
    int cpl, dpl, rpl;
2118
    SegmentCache *dt;
2119
    int index;
2120
    target_ulong ptr;
2121

    
2122
    selector &= 0xffff;
2123
    cpl = env->hflags & HF_CPL_MASK;
2124
    if ((selector & 0xfffc) == 0) {
2125
        /* null selector case */
2126
        if (seg_reg == R_SS
2127
#ifdef TARGET_X86_64
2128
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2129
#endif
2130
            )
2131
            raise_exception_err(EXCP0D_GPF, 0);
2132
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2133
    } else {
2134

    
2135
        if (selector & 0x4)
2136
            dt = &env->ldt;
2137
        else
2138
            dt = &env->gdt;
2139
        index = selector & ~7;
2140
        if ((index + 7) > dt->limit)
2141
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142
        ptr = dt->base + index;
2143
        e1 = ldl_kernel(ptr);
2144
        e2 = ldl_kernel(ptr + 4);
2145

    
2146
        if (!(e2 & DESC_S_MASK))
2147
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148
        rpl = selector & 3;
2149
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2150
        if (seg_reg == R_SS) {
2151
            /* must be writable segment */
2152
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2153
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154
            if (rpl != cpl || dpl != cpl)
2155
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2156
        } else {
2157
            /* must be readable segment */
2158
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2159
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160

    
2161
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2162
                /* if not conforming code, test rights */
2163
                if (dpl < cpl || dpl < rpl)
2164
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165
            }
2166
        }
2167

    
2168
        if (!(e2 & DESC_P_MASK)) {
2169
            if (seg_reg == R_SS)
2170
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2171
            else
2172
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2173
        }
2174

    
2175
        /* set the access bit if not already set */
2176
        if (!(e2 & DESC_A_MASK)) {
2177
            e2 |= DESC_A_MASK;
2178
            stl_kernel(ptr + 4, e2);
2179
        }
2180

    
2181
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2182
                       get_seg_base(e1, e2),
2183
                       get_seg_limit(e1, e2),
2184
                       e2);
2185
#if 0
2186
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2187
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2188
#endif
2189
    }
2190
}
2191

    
2192
/* protected mode jump */
2193
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2194
                           int next_eip_addend)
2195
{
2196
    int gate_cs, type;
2197
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2198
    target_ulong next_eip;
2199

    
2200
    if ((new_cs & 0xfffc) == 0)
2201
        raise_exception_err(EXCP0D_GPF, 0);
2202
    if (load_segment(&e1, &e2, new_cs) != 0)
2203
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204
    cpl = env->hflags & HF_CPL_MASK;
2205
    if (e2 & DESC_S_MASK) {
2206
        if (!(e2 & DESC_CS_MASK))
2207
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2209
        if (e2 & DESC_C_MASK) {
2210
            /* conforming code segment */
2211
            if (dpl > cpl)
2212
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213
        } else {
2214
            /* non conforming code segment */
2215
            rpl = new_cs & 3;
2216
            if (rpl > cpl)
2217
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218
            if (dpl != cpl)
2219
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2220
        }
2221
        if (!(e2 & DESC_P_MASK))
2222
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2223
        limit = get_seg_limit(e1, e2);
2224
        if (new_eip > limit &&
2225
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2226
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2227
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2228
                       get_seg_base(e1, e2), limit, e2);
2229
        EIP = new_eip;
2230
    } else {
2231
        /* jump to call or task gate */
2232
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2233
        rpl = new_cs & 3;
2234
        cpl = env->hflags & HF_CPL_MASK;
2235
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2236
        switch(type) {
2237
        case 1: /* 286 TSS */
2238
        case 9: /* 386 TSS */
2239
        case 5: /* task gate */
2240
            if (dpl < cpl || dpl < rpl)
2241
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2242
            next_eip = env->eip + next_eip_addend;
2243
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2244
            CC_OP = CC_OP_EFLAGS;
2245
            break;
2246
        case 4: /* 286 call gate */
2247
        case 12: /* 386 call gate */
2248
            if ((dpl < cpl) || (dpl < rpl))
2249
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250
            if (!(e2 & DESC_P_MASK))
2251
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2252
            gate_cs = e1 >> 16;
2253
            new_eip = (e1 & 0xffff);
2254
            if (type == 12)
2255
                new_eip |= (e2 & 0xffff0000);
2256
            if (load_segment(&e1, &e2, gate_cs) != 0)
2257
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2258
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2259
            /* must be code segment */
2260
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2261
                 (DESC_S_MASK | DESC_CS_MASK)))
2262
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2264
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2265
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266
            if (!(e2 & DESC_P_MASK))
2267
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268
            limit = get_seg_limit(e1, e2);
2269
            if (new_eip > limit)
2270
                raise_exception_err(EXCP0D_GPF, 0);
2271
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2272
                                   get_seg_base(e1, e2), limit, e2);
2273
            EIP = new_eip;
2274
            break;
2275
        default:
2276
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277
            break;
2278
        }
2279
    }
2280
}
2281

    
2282
/* real mode call */
2283
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2284
                       int shift, int next_eip)
2285
{
2286
    int new_eip;
2287
    uint32_t esp, esp_mask;
2288
    target_ulong ssp;
2289

    
2290
    new_eip = new_eip1;
2291
    esp = ESP;
2292
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2293
    ssp = env->segs[R_SS].base;
2294
    if (shift) {
2295
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2296
        PUSHL(ssp, esp, esp_mask, next_eip);
2297
    } else {
2298
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2299
        PUSHW(ssp, esp, esp_mask, next_eip);
2300
    }
2301

    
2302
    SET_ESP(esp, esp_mask);
2303
    env->eip = new_eip;
2304
    env->segs[R_CS].selector = new_cs;
2305
    env->segs[R_CS].base = (new_cs << 4);
2306
}
2307

    
2308
/* protected mode call */
2309
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2310
                            int shift, int next_eip_addend)
2311
{
2312
    int new_stack, i;
2313
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2314
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2315
    uint32_t val, limit, old_sp_mask;
2316
    target_ulong ssp, old_ssp, next_eip;
2317

    
2318
    next_eip = env->eip + next_eip_addend;
2319
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2320
    LOG_PCALL_STATE(env);
2321
    if ((new_cs & 0xfffc) == 0)
2322
        raise_exception_err(EXCP0D_GPF, 0);
2323
    if (load_segment(&e1, &e2, new_cs) != 0)
2324
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325
    cpl = env->hflags & HF_CPL_MASK;
2326
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2327
    if (e2 & DESC_S_MASK) {
2328
        if (!(e2 & DESC_CS_MASK))
2329
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2331
        if (e2 & DESC_C_MASK) {
2332
            /* conforming code segment */
2333
            if (dpl > cpl)
2334
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335
        } else {
2336
            /* non conforming code segment */
2337
            rpl = new_cs & 3;
2338
            if (rpl > cpl)
2339
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340
            if (dpl != cpl)
2341
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2342
        }
2343
        if (!(e2 & DESC_P_MASK))
2344
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2345

    
2346
#ifdef TARGET_X86_64
2347
        /* XXX: check 16/32 bit cases in long mode */
2348
        if (shift == 2) {
2349
            target_ulong rsp;
2350
            /* 64 bit case */
2351
            rsp = ESP;
2352
            PUSHQ(rsp, env->segs[R_CS].selector);
2353
            PUSHQ(rsp, next_eip);
2354
            /* from this point, not restartable */
2355
            ESP = rsp;
2356
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2357
                                   get_seg_base(e1, e2),
2358
                                   get_seg_limit(e1, e2), e2);
2359
            EIP = new_eip;
2360
        } else
2361
#endif
2362
        {
2363
            sp = ESP;
2364
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2365
            ssp = env->segs[R_SS].base;
2366
            if (shift) {
2367
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2368
                PUSHL(ssp, sp, sp_mask, next_eip);
2369
            } else {
2370
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2371
                PUSHW(ssp, sp, sp_mask, next_eip);
2372
            }
2373

    
2374
            limit = get_seg_limit(e1, e2);
2375
            if (new_eip > limit)
2376
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377
            /* from this point, not restartable */
2378
            SET_ESP(sp, sp_mask);
2379
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2380
                                   get_seg_base(e1, e2), limit, e2);
2381
            EIP = new_eip;
2382
        }
2383
    } else {
2384
        /* check gate type */
2385
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2386
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2387
        rpl = new_cs & 3;
2388
        switch(type) {
2389
        case 1: /* available 286 TSS */
2390
        case 9: /* available 386 TSS */
2391
        case 5: /* task gate */
2392
            if (dpl < cpl || dpl < rpl)
2393
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2394
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2395
            CC_OP = CC_OP_EFLAGS;
2396
            return;
2397
        case 4: /* 286 call gate */
2398
        case 12: /* 386 call gate */
2399
            break;
2400
        default:
2401
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2402
            break;
2403
        }
2404
        shift = type >> 3;
2405

    
2406
        if (dpl < cpl || dpl < rpl)
2407
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408
        /* check valid bit */
2409
        if (!(e2 & DESC_P_MASK))
2410
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2411
        selector = e1 >> 16;
2412
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2413
        param_count = e2 & 0x1f;
2414
        if ((selector & 0xfffc) == 0)
2415
            raise_exception_err(EXCP0D_GPF, 0);
2416

    
2417
        if (load_segment(&e1, &e2, selector) != 0)
2418
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2419
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2420
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2421
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2422
        if (dpl > cpl)
2423
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424
        if (!(e2 & DESC_P_MASK))
2425
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2426

    
2427
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2428
            /* to inner privilege */
2429
            get_ss_esp_from_tss(&ss, &sp, dpl);
2430
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2431
                        ss, sp, param_count, ESP);
2432
            if ((ss & 0xfffc) == 0)
2433
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434
            if ((ss & 3) != dpl)
2435
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2437
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2439
            if (ss_dpl != dpl)
2440
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441
            if (!(ss_e2 & DESC_S_MASK) ||
2442
                (ss_e2 & DESC_CS_MASK) ||
2443
                !(ss_e2 & DESC_W_MASK))
2444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445
            if (!(ss_e2 & DESC_P_MASK))
2446
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2447

    
2448
            //            push_size = ((param_count * 2) + 8) << shift;
2449

    
2450
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2451
            old_ssp = env->segs[R_SS].base;
2452

    
2453
            sp_mask = get_sp_mask(ss_e2);
2454
            ssp = get_seg_base(ss_e1, ss_e2);
2455
            if (shift) {
2456
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2457
                PUSHL(ssp, sp, sp_mask, ESP);
2458
                for(i = param_count - 1; i >= 0; i--) {
2459
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2460
                    PUSHL(ssp, sp, sp_mask, val);
2461
                }
2462
            } else {
2463
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2464
                PUSHW(ssp, sp, sp_mask, ESP);
2465
                for(i = param_count - 1; i >= 0; i--) {
2466
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2467
                    PUSHW(ssp, sp, sp_mask, val);
2468
                }
2469
            }
2470
            new_stack = 1;
2471
        } else {
2472
            /* to same privilege */
2473
            sp = ESP;
2474
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2475
            ssp = env->segs[R_SS].base;
2476
            //            push_size = (4 << shift);
2477
            new_stack = 0;
2478
        }
2479

    
2480
        if (shift) {
2481
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2482
            PUSHL(ssp, sp, sp_mask, next_eip);
2483
        } else {
2484
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2485
            PUSHW(ssp, sp, sp_mask, next_eip);
2486
        }
2487

    
2488
        /* from this point, not restartable */
2489

    
2490
        if (new_stack) {
2491
            ss = (ss & ~3) | dpl;
2492
            cpu_x86_load_seg_cache(env, R_SS, ss,
2493
                                   ssp,
2494
                                   get_seg_limit(ss_e1, ss_e2),
2495
                                   ss_e2);
2496
        }
2497

    
2498
        selector = (selector & ~3) | dpl;
2499
        cpu_x86_load_seg_cache(env, R_CS, selector,
2500
                       get_seg_base(e1, e2),
2501
                       get_seg_limit(e1, e2),
2502
                       e2);
2503
        cpu_x86_set_cpl(env, dpl);
2504
        SET_ESP(sp, sp_mask);
2505
        EIP = offset;
2506
    }
2507
}
2508

    
2509
/* real and vm86 mode iret */
2510
void helper_iret_real(int shift)
2511
{
2512
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2513
    target_ulong ssp;
2514
    int eflags_mask;
2515

    
2516
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2517
    sp = ESP;
2518
    ssp = env->segs[R_SS].base;
2519
    if (shift == 1) {
2520
        /* 32 bits */
2521
        POPL(ssp, sp, sp_mask, new_eip);
2522
        POPL(ssp, sp, sp_mask, new_cs);
2523
        new_cs &= 0xffff;
2524
        POPL(ssp, sp, sp_mask, new_eflags);
2525
    } else {
2526
        /* 16 bits */
2527
        POPW(ssp, sp, sp_mask, new_eip);
2528
        POPW(ssp, sp, sp_mask, new_cs);
2529
        POPW(ssp, sp, sp_mask, new_eflags);
2530
    }
2531
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2532
    env->segs[R_CS].selector = new_cs;
2533
    env->segs[R_CS].base = (new_cs << 4);
2534
    env->eip = new_eip;
2535
    if (env->eflags & VM_MASK)
2536
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2537
    else
2538
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2539
    if (shift == 0)
2540
        eflags_mask &= 0xffff;
2541
    load_eflags(new_eflags, eflags_mask);
2542
    env->hflags2 &= ~HF2_NMI_MASK;
2543
}
2544

    
2545
static inline void validate_seg(int seg_reg, int cpl)
2546
{
2547
    int dpl;
2548
    uint32_t e2;
2549

    
2550
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2551
       they may still contain a valid base. I would be interested to
2552
       know how a real x86_64 CPU behaves */
2553
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2554
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2555
        return;
2556

    
2557
    e2 = env->segs[seg_reg].flags;
2558
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2559
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2560
        /* data or non conforming code segment */
2561
        if (dpl < cpl) {
2562
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2563
        }
2564
    }
2565
}
2566

    
2567
/* protected mode iret */
2568
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2569
{
2570
    uint32_t new_cs, new_eflags, new_ss;
2571
    uint32_t new_es, new_ds, new_fs, new_gs;
2572
    uint32_t e1, e2, ss_e1, ss_e2;
2573
    int cpl, dpl, rpl, eflags_mask, iopl;
2574
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2575

    
2576
#ifdef TARGET_X86_64
2577
    if (shift == 2)
2578
        sp_mask = -1;
2579
    else
2580
#endif
2581
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2582
    sp = ESP;
2583
    ssp = env->segs[R_SS].base;
2584
    new_eflags = 0; /* avoid warning */
2585
#ifdef TARGET_X86_64
2586
    if (shift == 2) {
2587
        POPQ(sp, new_eip);
2588
        POPQ(sp, new_cs);
2589
        new_cs &= 0xffff;
2590
        if (is_iret) {
2591
            POPQ(sp, new_eflags);
2592
        }
2593
    } else
2594
#endif
2595
    if (shift == 1) {
2596
        /* 32 bits */
2597
        POPL(ssp, sp, sp_mask, new_eip);
2598
        POPL(ssp, sp, sp_mask, new_cs);
2599
        new_cs &= 0xffff;
2600
        if (is_iret) {
2601
            POPL(ssp, sp, sp_mask, new_eflags);
2602
            if (new_eflags & VM_MASK)
2603
                goto return_to_vm86;
2604
        }
2605
    } else {
2606
        /* 16 bits */
2607
        POPW(ssp, sp, sp_mask, new_eip);
2608
        POPW(ssp, sp, sp_mask, new_cs);
2609
        if (is_iret)
2610
            POPW(ssp, sp, sp_mask, new_eflags);
2611
    }
2612
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2613
              new_cs, new_eip, shift, addend);
2614
    LOG_PCALL_STATE(env);
2615
    if ((new_cs & 0xfffc) == 0)
2616
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2617
    if (load_segment(&e1, &e2, new_cs) != 0)
2618
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2619
    if (!(e2 & DESC_S_MASK) ||
2620
        !(e2 & DESC_CS_MASK))
2621
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622
    cpl = env->hflags & HF_CPL_MASK;
2623
    rpl = new_cs & 3;
2624
    if (rpl < cpl)
2625
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2627
    if (e2 & DESC_C_MASK) {
2628
        if (dpl > rpl)
2629
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630
    } else {
2631
        if (dpl != rpl)
2632
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2633
    }
2634
    if (!(e2 & DESC_P_MASK))
2635
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2636

    
2637
    sp += addend;
2638
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2639
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2640
        /* return to same privilege level */
2641
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2642
                       get_seg_base(e1, e2),
2643
                       get_seg_limit(e1, e2),
2644
                       e2);
2645
    } else {
2646
        /* return to different privilege level */
2647
#ifdef TARGET_X86_64
2648
        if (shift == 2) {
2649
            POPQ(sp, new_esp);
2650
            POPQ(sp, new_ss);
2651
            new_ss &= 0xffff;
2652
        } else
2653
#endif
2654
        if (shift == 1) {
2655
            /* 32 bits */
2656
            POPL(ssp, sp, sp_mask, new_esp);
2657
            POPL(ssp, sp, sp_mask, new_ss);
2658
            new_ss &= 0xffff;
2659
        } else {
2660
            /* 16 bits */
2661
            POPW(ssp, sp, sp_mask, new_esp);
2662
            POPW(ssp, sp, sp_mask, new_ss);
2663
        }
2664
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2665
                    new_ss, new_esp);
2666
        if ((new_ss & 0xfffc) == 0) {
2667
#ifdef TARGET_X86_64
2668
            /* NULL ss is allowed in long mode if cpl != 3*/
2669
            /* XXX: test CS64 ? */
2670
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2671
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2672
                                       0, 0xffffffff,
2673
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2674
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2675
                                       DESC_W_MASK | DESC_A_MASK);
2676
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2677
            } else
2678
#endif
2679
            {
2680
                raise_exception_err(EXCP0D_GPF, 0);
2681
            }
2682
        } else {
2683
            if ((new_ss & 3) != rpl)
2684
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2686
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2687
            if (!(ss_e2 & DESC_S_MASK) ||
2688
                (ss_e2 & DESC_CS_MASK) ||
2689
                !(ss_e2 & DESC_W_MASK))
2690
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2692
            if (dpl != rpl)
2693
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2694
            if (!(ss_e2 & DESC_P_MASK))
2695
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2696
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2697
                                   get_seg_base(ss_e1, ss_e2),
2698
                                   get_seg_limit(ss_e1, ss_e2),
2699
                                   ss_e2);
2700
        }
2701

    
2702
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2703
                       get_seg_base(e1, e2),
2704
                       get_seg_limit(e1, e2),
2705
                       e2);
2706
        cpu_x86_set_cpl(env, rpl);
2707
        sp = new_esp;
2708
#ifdef TARGET_X86_64
2709
        if (env->hflags & HF_CS64_MASK)
2710
            sp_mask = -1;
2711
        else
2712
#endif
2713
            sp_mask = get_sp_mask(ss_e2);
2714

    
2715
        /* validate data segments */
2716
        validate_seg(R_ES, rpl);
2717
        validate_seg(R_DS, rpl);
2718
        validate_seg(R_FS, rpl);
2719
        validate_seg(R_GS, rpl);
2720

    
2721
        sp += addend;
2722
    }
2723
    SET_ESP(sp, sp_mask);
2724
    env->eip = new_eip;
2725
    if (is_iret) {
2726
        /* NOTE: 'cpl' is the _old_ CPL */
2727
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2728
        if (cpl == 0)
2729
            eflags_mask |= IOPL_MASK;
2730
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2731
        if (cpl <= iopl)
2732
            eflags_mask |= IF_MASK;
2733
        if (shift == 0)
2734
            eflags_mask &= 0xffff;
2735
        load_eflags(new_eflags, eflags_mask);
2736
    }
2737
    return;
2738

    
2739
 return_to_vm86:
2740
    POPL(ssp, sp, sp_mask, new_esp);
2741
    POPL(ssp, sp, sp_mask, new_ss);
2742
    POPL(ssp, sp, sp_mask, new_es);
2743
    POPL(ssp, sp, sp_mask, new_ds);
2744
    POPL(ssp, sp, sp_mask, new_fs);
2745
    POPL(ssp, sp, sp_mask, new_gs);
2746

    
2747
    /* modify processor state */
2748
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2749
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2750
    load_seg_vm(R_CS, new_cs & 0xffff);
2751
    cpu_x86_set_cpl(env, 3);
2752
    load_seg_vm(R_SS, new_ss & 0xffff);
2753
    load_seg_vm(R_ES, new_es & 0xffff);
2754
    load_seg_vm(R_DS, new_ds & 0xffff);
2755
    load_seg_vm(R_FS, new_fs & 0xffff);
2756
    load_seg_vm(R_GS, new_gs & 0xffff);
2757

    
2758
    env->eip = new_eip & 0xffff;
2759
    ESP = new_esp;
2760
}
2761

    
2762
void helper_iret_protected(int shift, int next_eip)
2763
{
2764
    int tss_selector, type;
2765
    uint32_t e1, e2;
2766

    
2767
    /* specific case for TSS */
2768
    if (env->eflags & NT_MASK) {
2769
#ifdef TARGET_X86_64
2770
        if (env->hflags & HF_LMA_MASK)
2771
            raise_exception_err(EXCP0D_GPF, 0);
2772
#endif
2773
        tss_selector = lduw_kernel(env->tr.base + 0);
2774
        if (tss_selector & 4)
2775
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776
        if (load_segment(&e1, &e2, tss_selector) != 0)
2777
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2778
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2779
        /* NOTE: we check both segment and busy TSS */
2780
        if (type != 3)
2781
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2783
    } else {
2784
        helper_ret_protected(shift, 1, 0);
2785
    }
2786
    env->hflags2 &= ~HF2_NMI_MASK;
2787
}
2788

    
2789
void helper_lret_protected(int shift, int addend)
2790
{
2791
    helper_ret_protected(shift, 0, addend);
2792
}
2793

    
2794
void helper_sysenter(void)
2795
{
2796
    if (env->sysenter_cs == 0) {
2797
        raise_exception_err(EXCP0D_GPF, 0);
2798
    }
2799
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2800
    cpu_x86_set_cpl(env, 0);
2801

    
2802
#ifdef TARGET_X86_64
2803
    if (env->hflags & HF_LMA_MASK) {
2804
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2805
                               0, 0xffffffff,
2806
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2807
                               DESC_S_MASK |
2808
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2809
    } else
2810
#endif
2811
    {
2812
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2813
                               0, 0xffffffff,
2814
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815
                               DESC_S_MASK |
2816
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2817
    }
2818
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2819
                           0, 0xffffffff,
2820
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2821
                           DESC_S_MASK |
2822
                           DESC_W_MASK | DESC_A_MASK);
2823
    ESP = env->sysenter_esp;
2824
    EIP = env->sysenter_eip;
2825
}
2826

    
2827
void helper_sysexit(int dflag)
2828
{
2829
    int cpl;
2830

    
2831
    cpl = env->hflags & HF_CPL_MASK;
2832
    if (env->sysenter_cs == 0 || cpl != 0) {
2833
        raise_exception_err(EXCP0D_GPF, 0);
2834
    }
2835
    cpu_x86_set_cpl(env, 3);
2836
#ifdef TARGET_X86_64
2837
    if (dflag == 2) {
2838
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2839
                               0, 0xffffffff,
2840
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2841
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2842
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2843
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2844
                               0, 0xffffffff,
2845
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847
                               DESC_W_MASK | DESC_A_MASK);
2848
    } else
2849
#endif
2850
    {
2851
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2852
                               0, 0xffffffff,
2853
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2854
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2855
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2856
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2857
                               0, 0xffffffff,
2858
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2860
                               DESC_W_MASK | DESC_A_MASK);
2861
    }
2862
    ESP = ECX;
2863
    EIP = EDX;
2864
}
2865

    
2866
#if defined(CONFIG_USER_ONLY)
2867
target_ulong helper_read_crN(int reg)
2868
{
2869
    return 0;
2870
}
2871

    
2872
void helper_write_crN(int reg, target_ulong t0)
2873
{
2874
}
2875

    
2876
void helper_movl_drN_T0(int reg, target_ulong t0)
2877
{
2878
}
2879
#else
2880
target_ulong helper_read_crN(int reg)
2881
{
2882
    target_ulong val;
2883

    
2884
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2885
    switch(reg) {
2886
    default:
2887
        val = env->cr[reg];
2888
        break;
2889
    case 8:
2890
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2891
            val = cpu_get_apic_tpr(env->apic_state);
2892
        } else {
2893
            val = env->v_tpr;
2894
        }
2895
        break;
2896
    }
2897
    return val;
2898
}
2899

    
2900
void helper_write_crN(int reg, target_ulong t0)
2901
{
2902
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2903
    switch(reg) {
2904
    case 0:
2905
        cpu_x86_update_cr0(env, t0);
2906
        break;
2907
    case 3:
2908
        cpu_x86_update_cr3(env, t0);
2909
        break;
2910
    case 4:
2911
        cpu_x86_update_cr4(env, t0);
2912
        break;
2913
    case 8:
2914
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2915
            cpu_set_apic_tpr(env->apic_state, t0);
2916
        }
2917
        env->v_tpr = t0 & 0x0f;
2918
        break;
2919
    default:
2920
        env->cr[reg] = t0;
2921
        break;
2922
    }
2923
}
2924

    
2925
void helper_movl_drN_T0(int reg, target_ulong t0)
2926
{
2927
    int i;
2928

    
2929
    if (reg < 4) {
2930
        hw_breakpoint_remove(env, reg);
2931
        env->dr[reg] = t0;
2932
        hw_breakpoint_insert(env, reg);
2933
    } else if (reg == 7) {
2934
        for (i = 0; i < 4; i++)
2935
            hw_breakpoint_remove(env, i);
2936
        env->dr[7] = t0;
2937
        for (i = 0; i < 4; i++)
2938
            hw_breakpoint_insert(env, i);
2939
    } else
2940
        env->dr[reg] = t0;
2941
}
2942
#endif
2943

    
2944
void helper_lmsw(target_ulong t0)
2945
{
2946
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2947
       if already set to one. */
2948
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2949
    helper_write_crN(0, t0);
2950
}
2951

    
2952
void helper_clts(void)
2953
{
2954
    env->cr[0] &= ~CR0_TS_MASK;
2955
    env->hflags &= ~HF_TS_MASK;
2956
}
2957

    
2958
void helper_invlpg(target_ulong addr)
2959
{
2960
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2961
    tlb_flush_page(env, addr);
2962
}
2963

    
2964
void helper_rdtsc(void)
2965
{
2966
    uint64_t val;
2967

    
2968
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2969
        raise_exception(EXCP0D_GPF);
2970
    }
2971
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2972

    
2973
    val = cpu_get_tsc(env) + env->tsc_offset;
2974
    EAX = (uint32_t)(val);
2975
    EDX = (uint32_t)(val >> 32);
2976
}
2977

    
2978
void helper_rdtscp(void)
2979
{
2980
    helper_rdtsc();
2981
    ECX = (uint32_t)(env->tsc_aux);
2982
}
2983

    
2984
void helper_rdpmc(void)
2985
{
2986
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2987
        raise_exception(EXCP0D_GPF);
2988
    }
2989
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2990
    
2991
    /* currently unimplemented */
2992
    raise_exception_err(EXCP06_ILLOP, 0);
2993
}
2994

    
2995
#if defined(CONFIG_USER_ONLY)
2996
void helper_wrmsr(void)
2997
{
2998
}
2999

    
3000
void helper_rdmsr(void)
3001
{
3002
}
3003
#else
3004
void helper_wrmsr(void)
3005
{
3006
    uint64_t val;
3007

    
3008
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3009

    
3010
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3011

    
3012
    switch((uint32_t)ECX) {
3013
    case MSR_IA32_SYSENTER_CS:
3014
        env->sysenter_cs = val & 0xffff;
3015
        break;
3016
    case MSR_IA32_SYSENTER_ESP:
3017
        env->sysenter_esp = val;
3018
        break;
3019
    case MSR_IA32_SYSENTER_EIP:
3020
        env->sysenter_eip = val;
3021
        break;
3022
    case MSR_IA32_APICBASE:
3023
        cpu_set_apic_base(env->apic_state, val);
3024
        break;
3025
    case MSR_EFER:
3026
        {
3027
            uint64_t update_mask;
3028
            update_mask = 0;
3029
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3030
                update_mask |= MSR_EFER_SCE;
3031
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3032
                update_mask |= MSR_EFER_LME;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3034
                update_mask |= MSR_EFER_FFXSR;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3036
                update_mask |= MSR_EFER_NXE;
3037
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3038
                update_mask |= MSR_EFER_SVME;
3039
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3040
                update_mask |= MSR_EFER_FFXSR;
3041
            cpu_load_efer(env, (env->efer & ~update_mask) |
3042
                          (val & update_mask));
3043
        }
3044
        break;
3045
    case MSR_STAR:
3046
        env->star = val;
3047
        break;
3048
    case MSR_PAT:
3049
        env->pat = val;
3050
        break;
3051
    case MSR_VM_HSAVE_PA:
3052
        env->vm_hsave = val;
3053
        break;
3054
#ifdef TARGET_X86_64
3055
    case MSR_LSTAR:
3056
        env->lstar = val;
3057
        break;
3058
    case MSR_CSTAR:
3059
        env->cstar = val;
3060
        break;
3061
    case MSR_FMASK:
3062
        env->fmask = val;
3063
        break;
3064
    case MSR_FSBASE:
3065
        env->segs[R_FS].base = val;
3066
        break;
3067
    case MSR_GSBASE:
3068
        env->segs[R_GS].base = val;
3069
        break;
3070
    case MSR_KERNELGSBASE:
3071
        env->kernelgsbase = val;
3072
        break;
3073
#endif
3074
    case MSR_MTRRphysBase(0):
3075
    case MSR_MTRRphysBase(1):
3076
    case MSR_MTRRphysBase(2):
3077
    case MSR_MTRRphysBase(3):
3078
    case MSR_MTRRphysBase(4):
3079
    case MSR_MTRRphysBase(5):
3080
    case MSR_MTRRphysBase(6):
3081
    case MSR_MTRRphysBase(7):
3082
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3083
        break;
3084
    case MSR_MTRRphysMask(0):
3085
    case MSR_MTRRphysMask(1):
3086
    case MSR_MTRRphysMask(2):
3087
    case MSR_MTRRphysMask(3):
3088
    case MSR_MTRRphysMask(4):
3089
    case MSR_MTRRphysMask(5):
3090
    case MSR_MTRRphysMask(6):
3091
    case MSR_MTRRphysMask(7):
3092
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3093
        break;
3094
    case MSR_MTRRfix64K_00000:
3095
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3096
        break;
3097
    case MSR_MTRRfix16K_80000:
3098
    case MSR_MTRRfix16K_A0000:
3099
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3100
        break;
3101
    case MSR_MTRRfix4K_C0000:
3102
    case MSR_MTRRfix4K_C8000:
3103
    case MSR_MTRRfix4K_D0000:
3104
    case MSR_MTRRfix4K_D8000:
3105
    case MSR_MTRRfix4K_E0000:
3106
    case MSR_MTRRfix4K_E8000:
3107
    case MSR_MTRRfix4K_F0000:
3108
    case MSR_MTRRfix4K_F8000:
3109
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3110
        break;
3111
    case MSR_MTRRdefType:
3112
        env->mtrr_deftype = val;
3113
        break;
3114
    case MSR_MCG_STATUS:
3115
        env->mcg_status = val;
3116
        break;
3117
    case MSR_MCG_CTL:
3118
        if ((env->mcg_cap & MCG_CTL_P)
3119
            && (val == 0 || val == ~(uint64_t)0))
3120
            env->mcg_ctl = val;
3121
        break;
3122
    case MSR_TSC_AUX:
3123
        env->tsc_aux = val;
3124
        break;
3125
    default:
3126
        if ((uint32_t)ECX >= MSR_MC0_CTL
3127
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3128
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3129
            if ((offset & 0x3) != 0
3130
                || (val == 0 || val == ~(uint64_t)0))
3131
                env->mce_banks[offset] = val;
3132
            break;
3133
        }
3134
        /* XXX: exception ? */
3135
        break;
3136
    }
3137
}
3138

    
3139
void helper_rdmsr(void)
3140
{
3141
    uint64_t val;
3142

    
3143
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3144

    
3145
    switch((uint32_t)ECX) {
3146
    case MSR_IA32_SYSENTER_CS:
3147
        val = env->sysenter_cs;
3148
        break;
3149
    case MSR_IA32_SYSENTER_ESP:
3150
        val = env->sysenter_esp;
3151
        break;
3152
    case MSR_IA32_SYSENTER_EIP:
3153
        val = env->sysenter_eip;
3154
        break;
3155
    case MSR_IA32_APICBASE:
3156
        val = cpu_get_apic_base(env->apic_state);
3157
        break;
3158
    case MSR_EFER:
3159
        val = env->efer;
3160
        break;
3161
    case MSR_STAR:
3162
        val = env->star;
3163
        break;
3164
    case MSR_PAT:
3165
        val = env->pat;
3166
        break;
3167
    case MSR_VM_HSAVE_PA:
3168
        val = env->vm_hsave;
3169
        break;
3170
    case MSR_IA32_PERF_STATUS:
3171
        /* tsc_increment_by_tick */
3172
        val = 1000ULL;
3173
        /* CPU multiplier */
3174
        val |= (((uint64_t)4ULL) << 40);
3175
        break;
3176
#ifdef TARGET_X86_64
3177
    case MSR_LSTAR:
3178
        val = env->lstar;
3179
        break;
3180
    case MSR_CSTAR:
3181
        val = env->cstar;
3182
        break;
3183
    case MSR_FMASK:
3184
        val = env->fmask;
3185
        break;
3186
    case MSR_FSBASE:
3187
        val = env->segs[R_FS].base;
3188
        break;
3189
    case MSR_GSBASE:
3190
        val = env->segs[R_GS].base;
3191
        break;
3192
    case MSR_KERNELGSBASE:
3193
        val = env->kernelgsbase;
3194
        break;
3195
    case MSR_TSC_AUX:
3196
        val = env->tsc_aux;
3197
        break;
3198
#endif
3199
    case MSR_MTRRphysBase(0):
3200
    case MSR_MTRRphysBase(1):
3201
    case MSR_MTRRphysBase(2):
3202
    case MSR_MTRRphysBase(3):
3203
    case MSR_MTRRphysBase(4):
3204
    case MSR_MTRRphysBase(5):
3205
    case MSR_MTRRphysBase(6):
3206
    case MSR_MTRRphysBase(7):
3207
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3208
        break;
3209
    case MSR_MTRRphysMask(0):
3210
    case MSR_MTRRphysMask(1):
3211
    case MSR_MTRRphysMask(2):
3212
    case MSR_MTRRphysMask(3):
3213
    case MSR_MTRRphysMask(4):
3214
    case MSR_MTRRphysMask(5):
3215
    case MSR_MTRRphysMask(6):
3216
    case MSR_MTRRphysMask(7):
3217
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3218
        break;
3219
    case MSR_MTRRfix64K_00000:
3220
        val = env->mtrr_fixed[0];
3221
        break;
3222
    case MSR_MTRRfix16K_80000:
3223
    case MSR_MTRRfix16K_A0000:
3224
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3225
        break;
3226
    case MSR_MTRRfix4K_C0000:
3227
    case MSR_MTRRfix4K_C8000:
3228
    case MSR_MTRRfix4K_D0000:
3229
    case MSR_MTRRfix4K_D8000:
3230
    case MSR_MTRRfix4K_E0000:
3231
    case MSR_MTRRfix4K_E8000:
3232
    case MSR_MTRRfix4K_F0000:
3233
    case MSR_MTRRfix4K_F8000:
3234
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3235
        break;
3236
    case MSR_MTRRdefType:
3237
        val = env->mtrr_deftype;
3238
        break;
3239
    case MSR_MTRRcap:
3240
        if (env->cpuid_features & CPUID_MTRR)
3241
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3242
        else
3243
            /* XXX: exception ? */
3244
            val = 0;
3245
        break;
3246
    case MSR_MCG_CAP:
3247
        val = env->mcg_cap;
3248
        break;
3249
    case MSR_MCG_CTL:
3250
        if (env->mcg_cap & MCG_CTL_P)
3251
            val = env->mcg_ctl;
3252
        else
3253
            val = 0;
3254
        break;
3255
    case MSR_MCG_STATUS:
3256
        val = env->mcg_status;
3257
        break;
3258
    default:
3259
        if ((uint32_t)ECX >= MSR_MC0_CTL
3260
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3261
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3262
            val = env->mce_banks[offset];
3263
            break;
3264
        }
3265
        /* XXX: exception ? */
3266
        val = 0;
3267
        break;
3268
    }
3269
    EAX = (uint32_t)(val);
3270
    EDX = (uint32_t)(val >> 32);
3271
}
3272
#endif
3273

    
3274
target_ulong helper_lsl(target_ulong selector1)
3275
{
3276
    unsigned int limit;
3277
    uint32_t e1, e2, eflags, selector;
3278
    int rpl, dpl, cpl, type;
3279

    
3280
    selector = selector1 & 0xffff;
3281
    eflags = helper_cc_compute_all(CC_OP);
3282
    if ((selector & 0xfffc) == 0)
3283
        goto fail;
3284
    if (load_segment(&e1, &e2, selector) != 0)
3285
        goto fail;
3286
    rpl = selector & 3;
3287
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3288
    cpl = env->hflags & HF_CPL_MASK;
3289
    if (e2 & DESC_S_MASK) {
3290
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3291
            /* conforming */
3292
        } else {
3293
            if (dpl < cpl || dpl < rpl)
3294
                goto fail;
3295
        }
3296
    } else {
3297
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3298
        switch(type) {
3299
        case 1:
3300
        case 2:
3301
        case 3:
3302
        case 9:
3303
        case 11:
3304
            break;
3305
        default:
3306
            goto fail;
3307
        }
3308
        if (dpl < cpl || dpl < rpl) {
3309
        fail:
3310
            CC_SRC = eflags & ~CC_Z;
3311
            return 0;
3312
        }
3313
    }
3314
    limit = get_seg_limit(e1, e2);
3315
    CC_SRC = eflags | CC_Z;
3316
    return limit;
3317
}
3318

    
3319
target_ulong helper_lar(target_ulong selector1)
3320
{
3321
    uint32_t e1, e2, eflags, selector;
3322
    int rpl, dpl, cpl, type;
3323

    
3324
    selector = selector1 & 0xffff;
3325
    eflags = helper_cc_compute_all(CC_OP);
3326
    if ((selector & 0xfffc) == 0)
3327
        goto fail;
3328
    if (load_segment(&e1, &e2, selector) != 0)
3329
        goto fail;
3330
    rpl = selector & 3;
3331
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3332
    cpl = env->hflags & HF_CPL_MASK;
3333
    if (e2 & DESC_S_MASK) {
3334
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3335
            /* conforming */
3336
        } else {
3337
            if (dpl < cpl || dpl < rpl)
3338
                goto fail;
3339
        }
3340
    } else {
3341
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3342
        switch(type) {
3343
        case 1:
3344
        case 2:
3345
        case 3:
3346
        case 4:
3347
        case 5:
3348
        case 9:
3349
        case 11:
3350
        case 12:
3351
            break;
3352
        default:
3353
            goto fail;
3354
        }
3355
        if (dpl < cpl || dpl < rpl) {
3356
        fail:
3357
            CC_SRC = eflags & ~CC_Z;
3358
            return 0;
3359
        }
3360
    }
3361
    CC_SRC = eflags | CC_Z;
3362
    return e2 & 0x00f0ff00;
3363
}
3364

    
3365
void helper_verr(target_ulong selector1)
3366
{
3367
    uint32_t e1, e2, eflags, selector;
3368
    int rpl, dpl, cpl;
3369

    
3370
    selector = selector1 & 0xffff;
3371
    eflags = helper_cc_compute_all(CC_OP);
3372
    if ((selector & 0xfffc) == 0)
3373
        goto fail;
3374
    if (load_segment(&e1, &e2, selector) != 0)
3375
        goto fail;
3376
    if (!(e2 & DESC_S_MASK))
3377
        goto fail;
3378
    rpl = selector & 3;
3379
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3380
    cpl = env->hflags & HF_CPL_MASK;
3381
    if (e2 & DESC_CS_MASK) {
3382
        if (!(e2 & DESC_R_MASK))
3383
            goto fail;
3384
        if (!(e2 & DESC_C_MASK)) {
3385
            if (dpl < cpl || dpl < rpl)
3386
                goto fail;
3387
        }
3388
    } else {
3389
        if (dpl < cpl || dpl < rpl) {
3390
        fail:
3391
            CC_SRC = eflags & ~CC_Z;
3392
            return;
3393
        }
3394
    }
3395
    CC_SRC = eflags | CC_Z;
3396
}
3397

    
3398
void helper_verw(target_ulong selector1)
3399
{
3400
    uint32_t e1, e2, eflags, selector;
3401
    int rpl, dpl, cpl;
3402

    
3403
    selector = selector1 & 0xffff;
3404
    eflags = helper_cc_compute_all(CC_OP);
3405
    if ((selector & 0xfffc) == 0)
3406
        goto fail;
3407
    if (load_segment(&e1, &e2, selector) != 0)
3408
        goto fail;
3409
    if (!(e2 & DESC_S_MASK))
3410
        goto fail;
3411
    rpl = selector & 3;
3412
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3413
    cpl = env->hflags & HF_CPL_MASK;
3414
    if (e2 & DESC_CS_MASK) {
3415
        goto fail;
3416
    } else {
3417
        if (dpl < cpl || dpl < rpl)
3418
            goto fail;
3419
        if (!(e2 & DESC_W_MASK)) {
3420
        fail:
3421
            CC_SRC = eflags & ~CC_Z;
3422
            return;
3423
        }
3424
    }
3425
    CC_SRC = eflags | CC_Z;
3426
}
3427

    
3428
/* x87 FPU helpers */
3429

    
3430
static void fpu_set_exception(int mask)
3431
{
3432
    env->fpus |= mask;
3433
    if (env->fpus & (~env->fpuc & FPUC_EM))
3434
        env->fpus |= FPUS_SE | FPUS_B;
3435
}
3436

    
3437
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3438
{
3439
    if (b == 0.0)
3440
        fpu_set_exception(FPUS_ZE);
3441
    return a / b;
3442
}
3443

    
3444
static void fpu_raise_exception(void)
3445
{
3446
    if (env->cr[0] & CR0_NE_MASK) {
3447
        raise_exception(EXCP10_COPR);
3448
    }
3449
#if !defined(CONFIG_USER_ONLY)
3450
    else {
3451
        cpu_set_ferr(env);
3452
    }
3453
#endif
3454
}
3455

    
3456
void helper_flds_FT0(uint32_t val)
3457
{
3458
    union {
3459
        float32 f;
3460
        uint32_t i;
3461
    } u;
3462
    u.i = val;
3463
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3464
}
3465

    
3466
void helper_fldl_FT0(uint64_t val)
3467
{
3468
    union {
3469
        float64 f;
3470
        uint64_t i;
3471
    } u;
3472
    u.i = val;
3473
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3474
}
3475

    
3476
void helper_fildl_FT0(int32_t val)
3477
{
3478
    FT0 = int32_to_floatx(val, &env->fp_status);
3479
}
3480

    
3481
void helper_flds_ST0(uint32_t val)
3482
{
3483
    int new_fpstt;
3484
    union {
3485
        float32 f;
3486
        uint32_t i;
3487
    } u;
3488
    new_fpstt = (env->fpstt - 1) & 7;
3489
    u.i = val;
3490
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3491
    env->fpstt = new_fpstt;
3492
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3493
}
3494

    
3495
void helper_fldl_ST0(uint64_t val)
3496
{
3497
    int new_fpstt;
3498
    union {
3499
        float64 f;
3500
        uint64_t i;
3501
    } u;
3502
    new_fpstt = (env->fpstt - 1) & 7;
3503
    u.i = val;
3504
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3505
    env->fpstt = new_fpstt;
3506
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3507
}
3508

    
3509
void helper_fildl_ST0(int32_t val)
3510
{
3511
    int new_fpstt;
3512
    new_fpstt = (env->fpstt - 1) & 7;
3513
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3514
    env->fpstt = new_fpstt;
3515
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3516
}
3517

    
3518
void helper_fildll_ST0(int64_t val)
3519
{
3520
    int new_fpstt;
3521
    new_fpstt = (env->fpstt - 1) & 7;
3522
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3523
    env->fpstt = new_fpstt;
3524
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3525
}
3526

    
3527
uint32_t helper_fsts_ST0(void)
3528
{
3529
    union {
3530
        float32 f;
3531
        uint32_t i;
3532
    } u;
3533
    u.f = floatx_to_float32(ST0, &env->fp_status);
3534
    return u.i;
3535
}
3536

    
3537
uint64_t helper_fstl_ST0(void)
3538
{
3539
    union {
3540
        float64 f;
3541
        uint64_t i;
3542
    } u;
3543
    u.f = floatx_to_float64(ST0, &env->fp_status);
3544
    return u.i;
3545
}
3546

    
3547
int32_t helper_fist_ST0(void)
3548
{
3549
    int32_t val;
3550
    val = floatx_to_int32(ST0, &env->fp_status);
3551
    if (val != (int16_t)val)
3552
        val = -32768;
3553
    return val;
3554
}
3555

    
3556
int32_t helper_fistl_ST0(void)
3557
{
3558
    int32_t val;
3559
    val = floatx_to_int32(ST0, &env->fp_status);
3560
    return val;
3561
}
3562

    
3563
int64_t helper_fistll_ST0(void)
3564
{
3565
    int64_t val;
3566
    val = floatx_to_int64(ST0, &env->fp_status);
3567
    return val;
3568
}
3569

    
3570
int32_t helper_fistt_ST0(void)
3571
{
3572
    int32_t val;
3573
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3574
    if (val != (int16_t)val)
3575
        val = -32768;
3576
    return val;
3577
}
3578

    
3579
int32_t helper_fisttl_ST0(void)
3580
{
3581
    int32_t val;
3582
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3583
    return val;
3584
}
3585

    
3586
int64_t helper_fisttll_ST0(void)
3587
{
3588
    int64_t val;
3589
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3590
    return val;
3591
}
3592

    
3593
void helper_fldt_ST0(target_ulong ptr)
3594
{
3595
    int new_fpstt;
3596
    new_fpstt = (env->fpstt - 1) & 7;
3597
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3598
    env->fpstt = new_fpstt;
3599
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3600
}
3601

    
3602
void helper_fstt_ST0(target_ulong ptr)
3603
{
3604
    helper_fstt(ST0, ptr);
3605
}
3606

    
3607
void helper_fpush(void)
3608
{
3609
    fpush();
3610
}
3611

    
3612
void helper_fpop(void)
3613
{
3614
    fpop();
3615
}
3616

    
3617
void helper_fdecstp(void)
3618
{
3619
    env->fpstt = (env->fpstt - 1) & 7;
3620
    env->fpus &= (~0x4700);
3621
}
3622

    
3623
void helper_fincstp(void)
3624
{
3625
    env->fpstt = (env->fpstt + 1) & 7;
3626
    env->fpus &= (~0x4700);
3627
}
3628

    
3629
/* FPU move */
3630

    
3631
void helper_ffree_STN(int st_index)
3632
{
3633
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3634
}
3635

    
3636
void helper_fmov_ST0_FT0(void)
3637
{
3638
    ST0 = FT0;
3639
}
3640

    
3641
void helper_fmov_FT0_STN(int st_index)
3642
{
3643
    FT0 = ST(st_index);
3644
}
3645

    
3646
void helper_fmov_ST0_STN(int st_index)
3647
{
3648
    ST0 = ST(st_index);
3649
}
3650

    
3651
void helper_fmov_STN_ST0(int st_index)
3652
{
3653
    ST(st_index) = ST0;
3654
}
3655

    
3656
void helper_fxchg_ST0_STN(int st_index)
3657
{
3658
    CPU86_LDouble tmp;
3659
    tmp = ST(st_index);
3660
    ST(st_index) = ST0;
3661
    ST0 = tmp;
3662
}
3663

    
3664
/* FPU operations */
3665

    
3666
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3667

    
3668
void helper_fcom_ST0_FT0(void)
3669
{
3670
    int ret;
3671

    
3672
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3673
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3674
}
3675

    
3676
void helper_fucom_ST0_FT0(void)
3677
{
3678
    int ret;
3679

    
3680
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3681
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3682
}
3683

    
3684
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3685

    
3686
void helper_fcomi_ST0_FT0(void)
3687
{
3688
    int eflags;
3689
    int ret;
3690

    
3691
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3692
    eflags = helper_cc_compute_all(CC_OP);
3693
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3694
    CC_SRC = eflags;
3695
}
3696

    
3697
void helper_fucomi_ST0_FT0(void)
3698
{
3699
    int eflags;
3700
    int ret;
3701

    
3702
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3703
    eflags = helper_cc_compute_all(CC_OP);
3704
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3705
    CC_SRC = eflags;
3706
}
3707

    
3708
void helper_fadd_ST0_FT0(void)
3709
{
3710
    ST0 += FT0;
3711
}
3712

    
3713
void helper_fmul_ST0_FT0(void)
3714
{
3715
    ST0 *= FT0;
3716
}
3717

    
3718
void helper_fsub_ST0_FT0(void)
3719
{
3720
    ST0 -= FT0;
3721
}
3722

    
3723
void helper_fsubr_ST0_FT0(void)
3724
{
3725
    ST0 = FT0 - ST0;
3726
}
3727

    
3728
void helper_fdiv_ST0_FT0(void)
3729
{
3730
    ST0 = helper_fdiv(ST0, FT0);
3731
}
3732

    
3733
void helper_fdivr_ST0_FT0(void)
3734
{
3735
    ST0 = helper_fdiv(FT0, ST0);
3736
}
3737

    
3738
/* fp operations between STN and ST0 */
3739

    
3740
void helper_fadd_STN_ST0(int st_index)
3741
{
3742
    ST(st_index) += ST0;
3743
}
3744

    
3745
void helper_fmul_STN_ST0(int st_index)
3746
{
3747
    ST(st_index) *= ST0;
3748
}
3749

    
3750
void helper_fsub_STN_ST0(int st_index)
3751
{
3752
    ST(st_index) -= ST0;
3753
}
3754

    
3755
void helper_fsubr_STN_ST0(int st_index)
3756
{
3757
    CPU86_LDouble *p;
3758
    p = &ST(st_index);
3759
    *p = ST0 - *p;
3760
}
3761

    
3762
void helper_fdiv_STN_ST0(int st_index)
3763
{
3764
    CPU86_LDouble *p;
3765
    p = &ST(st_index);
3766
    *p = helper_fdiv(*p, ST0);
3767
}
3768

    
3769
void helper_fdivr_STN_ST0(int st_index)
3770
{
3771
    CPU86_LDouble *p;
3772
    p = &ST(st_index);
3773
    *p = helper_fdiv(ST0, *p);
3774
}
3775

    
3776
/* misc FPU operations */
3777
void helper_fchs_ST0(void)
3778
{
3779
    ST0 = floatx_chs(ST0);
3780
}
3781

    
3782
void helper_fabs_ST0(void)
3783
{
3784
    ST0 = floatx_abs(ST0);
3785
}
3786

    
3787
void helper_fld1_ST0(void)
3788
{
3789
    ST0 = f15rk[1];
3790
}
3791

    
3792
void helper_fldl2t_ST0(void)
3793
{
3794
    ST0 = f15rk[6];
3795
}
3796

    
3797
void helper_fldl2e_ST0(void)
3798
{
3799
    ST0 = f15rk[5];
3800
}
3801

    
3802
void helper_fldpi_ST0(void)
3803
{
3804
    ST0 = f15rk[2];
3805
}
3806

    
3807
void helper_fldlg2_ST0(void)
3808
{
3809
    ST0 = f15rk[3];
3810
}
3811

    
3812
void helper_fldln2_ST0(void)
3813
{
3814
    ST0 = f15rk[4];
3815
}
3816

    
3817
void helper_fldz_ST0(void)
3818
{
3819
    ST0 = f15rk[0];
3820
}
3821

    
3822
void helper_fldz_FT0(void)
3823
{
3824
    FT0 = f15rk[0];
3825
}
3826

    
3827
uint32_t helper_fnstsw(void)
3828
{
3829
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3830
}
3831

    
3832
uint32_t helper_fnstcw(void)
3833
{
3834
    return env->fpuc;
3835
}
3836

    
3837
static void update_fp_status(void)
3838
{
3839
    int rnd_type;
3840

    
3841
    /* set rounding mode */
3842
    switch(env->fpuc & RC_MASK) {
3843
    default:
3844
    case RC_NEAR:
3845
        rnd_type = float_round_nearest_even;
3846
        break;
3847
    case RC_DOWN:
3848
        rnd_type = float_round_down;
3849
        break;
3850
    case RC_UP:
3851
        rnd_type = float_round_up;
3852
        break;
3853
    case RC_CHOP:
3854
        rnd_type = float_round_to_zero;
3855
        break;
3856
    }
3857
    set_float_rounding_mode(rnd_type, &env->fp_status);
3858
#ifdef FLOATX80
3859
    switch((env->fpuc >> 8) & 3) {
3860
    case 0:
3861
        rnd_type = 32;
3862
        break;
3863
    case 2:
3864
        rnd_type = 64;
3865
        break;
3866
    case 3:
3867
    default:
3868
        rnd_type = 80;
3869
        break;
3870
    }
3871
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3872
#endif
3873
}
3874

    
3875
void helper_fldcw(uint32_t val)
3876
{
3877
    env->fpuc = val;
3878
    update_fp_status();
3879
}
3880

    
3881
void helper_fclex(void)
3882
{
3883
    env->fpus &= 0x7f00;
3884
}
3885

    
3886
void helper_fwait(void)
3887
{
3888
    if (env->fpus & FPUS_SE)
3889
        fpu_raise_exception();
3890
}
3891

    
3892
void helper_fninit(void)
3893
{
3894
    env->fpus = 0;
3895
    env->fpstt = 0;
3896
    env->fpuc = 0x37f;
3897
    env->fptags[0] = 1;
3898
    env->fptags[1] = 1;
3899
    env->fptags[2] = 1;
3900
    env->fptags[3] = 1;
3901
    env->fptags[4] = 1;
3902
    env->fptags[5] = 1;
3903
    env->fptags[6] = 1;
3904
    env->fptags[7] = 1;
3905
}
3906

    
3907
/* BCD ops */
3908

    
3909
void helper_fbld_ST0(target_ulong ptr)
3910
{
3911
    CPU86_LDouble tmp;
3912
    uint64_t val;
3913
    unsigned int v;
3914
    int i;
3915

    
3916
    val = 0;
3917
    for(i = 8; i >= 0; i--) {
3918
        v = ldub(ptr + i);
3919
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3920
    }
3921
    tmp = val;
3922
    if (ldub(ptr + 9) & 0x80)
3923
        tmp = -tmp;
3924
    fpush();
3925
    ST0 = tmp;
3926
}
3927

    
3928
void helper_fbst_ST0(target_ulong ptr)
3929
{
3930
    int v;
3931
    target_ulong mem_ref, mem_end;
3932
    int64_t val;
3933

    
3934
    val = floatx_to_int64(ST0, &env->fp_status);
3935
    mem_ref = ptr;
3936
    mem_end = mem_ref + 9;
3937
    if (val < 0) {
3938
        stb(mem_end, 0x80);
3939
        val = -val;
3940
    } else {
3941
        stb(mem_end, 0x00);
3942
    }
3943
    while (mem_ref < mem_end) {
3944
        if (val == 0)
3945
            break;
3946
        v = val % 100;
3947
        val = val / 100;
3948
        v = ((v / 10) << 4) | (v % 10);
3949
        stb(mem_ref++, v);
3950
    }
3951
    while (mem_ref < mem_end) {
3952
        stb(mem_ref++, 0);
3953
    }
3954
}
3955

    
3956
void helper_f2xm1(void)
3957
{
3958
    ST0 = pow(2.0,ST0) - 1.0;
3959
}
3960

    
3961
void helper_fyl2x(void)
3962
{
3963
    CPU86_LDouble fptemp;
3964

    
3965
    fptemp = ST0;
3966
    if (fptemp>0.0){
3967
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3968
        ST1 *= fptemp;
3969
        fpop();
3970
    } else {
3971
        env->fpus &= (~0x4700);
3972
        env->fpus |= 0x400;
3973
    }
3974
}
3975

    
3976
void helper_fptan(void)
3977
{
3978
    CPU86_LDouble fptemp;
3979

    
3980
    fptemp = ST0;
3981
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3982
        env->fpus |= 0x400;
3983
    } else {
3984
        ST0 = tan(fptemp);
3985
        fpush();
3986
        ST0 = 1.0;
3987
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3988
        /* the above code is for  |arg| < 2**52 only */
3989
    }
3990
}
3991

    
3992
void helper_fpatan(void)
3993
{
3994
    CPU86_LDouble fptemp, fpsrcop;
3995

    
3996
    fpsrcop = ST1;
3997
    fptemp = ST0;
3998
    ST1 = atan2(fpsrcop,fptemp);
3999
    fpop();
4000
}
4001

    
4002
void helper_fxtract(void)
4003
{
4004
    CPU86_LDoubleU temp;
4005
    unsigned int expdif;
4006

    
4007
    temp.d = ST0;
4008
    expdif = EXPD(temp) - EXPBIAS;
4009
    /*DP exponent bias*/
4010
    ST0 = expdif;
4011
    fpush();
4012
    BIASEXPONENT(temp);
4013
    ST0 = temp.d;
4014
}
4015

    
4016
void helper_fprem1(void)
4017
{
4018
    CPU86_LDouble dblq, fpsrcop, fptemp;
4019
    CPU86_LDoubleU fpsrcop1, fptemp1;
4020
    int expdif;
4021
    signed long long int q;
4022

    
4023
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4024
        ST0 = 0.0 / 0.0; /* NaN */
4025
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4026
        return;
4027
    }
4028

    
4029
    fpsrcop = ST0;
4030
    fptemp = ST1;
4031
    fpsrcop1.d = fpsrcop;
4032
    fptemp1.d = fptemp;
4033
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4034

    
4035
    if (expdif < 0) {
4036
        /* optimisation? taken from the AMD docs */
4037
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4038
        /* ST0 is unchanged */
4039
        return;
4040
    }
4041

    
4042
    if (expdif < 53) {
4043
        dblq = fpsrcop / fptemp;
4044
        /* round dblq towards nearest integer */
4045
        dblq = rint(dblq);
4046
        ST0 = fpsrcop - fptemp * dblq;
4047

    
4048
        /* convert dblq to q by truncating towards zero */
4049
        if (dblq < 0.0)
4050
           q = (signed long long int)(-dblq);
4051
        else
4052
           q = (signed long long int)dblq;
4053

    
4054
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4055
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4056
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4057
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4058
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4059
    } else {
4060
        env->fpus |= 0x400;  /* C2 <-- 1 */
4061
        fptemp = pow(2.0, expdif - 50);
4062
        fpsrcop = (ST0 / ST1) / fptemp;
4063
        /* fpsrcop = integer obtained by chopping */
4064
        fpsrcop = (fpsrcop < 0.0) ?
4065
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4066
        ST0 -= (ST1 * fpsrcop * fptemp);
4067
    }
4068
}
4069

    
4070
void helper_fprem(void)
4071
{
4072
    CPU86_LDouble dblq, fpsrcop, fptemp;
4073
    CPU86_LDoubleU fpsrcop1, fptemp1;
4074
    int expdif;
4075
    signed long long int q;
4076

    
4077
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4078
       ST0 = 0.0 / 0.0; /* NaN */
4079
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4080
       return;
4081
    }
4082

    
4083
    fpsrcop = (CPU86_LDouble)ST0;
4084
    fptemp = (CPU86_LDouble)ST1;
4085
    fpsrcop1.d = fpsrcop;
4086
    fptemp1.d = fptemp;
4087
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4088

    
4089
    if (expdif < 0) {
4090
        /* optimisation? taken from the AMD docs */
4091
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4092
        /* ST0 is unchanged */
4093
        return;
4094
    }
4095

    
4096
    if ( expdif < 53 ) {
4097
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4098
        /* round dblq towards zero */
4099
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4100
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4101

    
4102
        /* convert dblq to q by truncating towards zero */
4103
        if (dblq < 0.0)
4104
           q = (signed long long int)(-dblq);
4105
        else
4106
           q = (signed long long int)dblq;
4107

    
4108
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4109
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4110
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4111
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4112
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4113
    } else {
4114
        int N = 32 + (expdif % 32); /* as per AMD docs */
4115
        env->fpus |= 0x400;  /* C2 <-- 1 */
4116
        fptemp = pow(2.0, (double)(expdif - N));
4117
        fpsrcop = (ST0 / ST1) / fptemp;
4118
        /* fpsrcop = integer obtained by chopping */
4119
        fpsrcop = (fpsrcop < 0.0) ?
4120
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4121
        ST0 -= (ST1 * fpsrcop * fptemp);
4122
    }
4123
}
4124

    
4125
void helper_fyl2xp1(void)
4126
{
4127
    CPU86_LDouble fptemp;
4128

    
4129
    fptemp = ST0;
4130
    if ((fptemp+1.0)>0.0) {
4131
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4132
        ST1 *= fptemp;
4133
        fpop();
4134
    } else {
4135
        env->fpus &= (~0x4700);
4136
        env->fpus |= 0x400;
4137
    }
4138
}
4139

    
4140
void helper_fsqrt(void)
4141
{
4142
    CPU86_LDouble fptemp;
4143

    
4144
    fptemp = ST0;
4145
    if (fptemp<0.0) {
4146
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4147
        env->fpus |= 0x400;
4148
    }
4149
    ST0 = sqrt(fptemp);
4150
}
4151

    
4152
void helper_fsincos(void)
4153
{
4154
    CPU86_LDouble fptemp;
4155

    
4156
    fptemp = ST0;
4157
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4158
        env->fpus |= 0x400;
4159
    } else {
4160
        ST0 = sin(fptemp);
4161
        fpush();
4162
        ST0 = cos(fptemp);
4163
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4164
        /* the above code is for  |arg| < 2**63 only */
4165
    }
4166
}
4167

    
4168
void helper_frndint(void)
4169
{
4170
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4171
}
4172

    
4173
void helper_fscale(void)
4174
{
4175
    ST0 = ldexp (ST0, (int)(ST1));
4176
}
4177

    
4178
void helper_fsin(void)
4179
{
4180
    CPU86_LDouble fptemp;
4181

    
4182
    fptemp = ST0;
4183
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4184
        env->fpus |= 0x400;
4185
    } else {
4186
        ST0 = sin(fptemp);
4187
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4188
        /* the above code is for  |arg| < 2**53 only */
4189
    }
4190
}
4191

    
4192
void helper_fcos(void)
4193
{
4194
    CPU86_LDouble fptemp;
4195

    
4196
    fptemp = ST0;
4197
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4198
        env->fpus |= 0x400;
4199
    } else {
4200
        ST0 = cos(fptemp);
4201
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4202
        /* the above code is for  |arg5 < 2**63 only */
4203
    }
4204
}
4205

    
4206
void helper_fxam_ST0(void)
4207
{
4208
    CPU86_LDoubleU temp;
4209
    int expdif;
4210

    
4211
    temp.d = ST0;
4212

    
4213
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4214
    if (SIGND(temp))
4215
        env->fpus |= 0x200; /* C1 <-- 1 */
4216

    
4217
    /* XXX: test fptags too */
4218
    expdif = EXPD(temp);
4219
    if (expdif == MAXEXPD) {
4220
#ifdef USE_X86LDOUBLE
4221
        if (MANTD(temp) == 0x8000000000000000ULL)
4222
#else
4223
        if (MANTD(temp) == 0)
4224
#endif
4225
            env->fpus |=  0x500 /*Infinity*/;
4226
        else
4227
            env->fpus |=  0x100 /*NaN*/;
4228
    } else if (expdif == 0) {
4229
        if (MANTD(temp) == 0)
4230
            env->fpus |=  0x4000 /*Zero*/;
4231
        else
4232
            env->fpus |= 0x4400 /*Denormal*/;
4233
    } else {
4234
        env->fpus |= 0x400;
4235
    }
4236
}
4237

    
4238
void helper_fstenv(target_ulong ptr, int data32)
4239
{
4240
    int fpus, fptag, exp, i;
4241
    uint64_t mant;
4242
    CPU86_LDoubleU tmp;
4243

    
4244
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4245
    fptag = 0;
4246
    for (i=7; i>=0; i--) {
4247
        fptag <<= 2;
4248
        if (env->fptags[i]) {
4249
            fptag |= 3;
4250
        } else {
4251
            tmp.d = env->fpregs[i].d;
4252
            exp = EXPD(tmp);
4253
            mant = MANTD(tmp);
4254
            if (exp == 0 && mant == 0) {
4255
                /* zero */
4256
                fptag |= 1;
4257
            } else if (exp == 0 || exp == MAXEXPD
4258
#ifdef USE_X86LDOUBLE
4259
                       || (mant & (1LL << 63)) == 0
4260
#endif
4261
                       ) {
4262
                /* NaNs, infinity, denormal */
4263
                fptag |= 2;
4264
            }
4265
        }
4266
    }
4267
    if (data32) {
4268
        /* 32 bit */
4269
        stl(ptr, env->fpuc);
4270
        stl(ptr + 4, fpus);
4271
        stl(ptr + 8, fptag);
4272
        stl(ptr + 12, 0); /* fpip */
4273
        stl(ptr + 16, 0); /* fpcs */
4274
        stl(ptr + 20, 0); /* fpoo */
4275
        stl(ptr + 24, 0); /* fpos */
4276
    } else {
4277
        /* 16 bit */
4278
        stw(ptr, env->fpuc);
4279
        stw(ptr + 2, fpus);
4280
        stw(ptr + 4, fptag);
4281
        stw(ptr + 6, 0);
4282
        stw(ptr + 8, 0);
4283
        stw(ptr + 10, 0);
4284
        stw(ptr + 12, 0);
4285
    }
4286
}
4287

    
4288
void helper_fldenv(target_ulong ptr, int data32)
4289
{
4290
    int i, fpus, fptag;
4291

    
4292
    if (data32) {
4293
        env->fpuc = lduw(ptr);
4294
        fpus = lduw(ptr + 4);
4295
        fptag = lduw(ptr + 8);
4296
    }
4297
    else {
4298
        env->fpuc = lduw(ptr);
4299
        fpus = lduw(ptr + 2);
4300
        fptag = lduw(ptr + 4);
4301
    }
4302
    env->fpstt = (fpus >> 11) & 7;
4303
    env->fpus = fpus & ~0x3800;
4304
    for(i = 0;i < 8; i++) {
4305
        env->fptags[i] = ((fptag & 3) == 3);
4306
        fptag >>= 2;
4307
    }
4308
}
4309

    
4310
void helper_fsave(target_ulong ptr, int data32)
4311
{
4312
    CPU86_LDouble tmp;
4313
    int i;
4314

    
4315
    helper_fstenv(ptr, data32);
4316

    
4317
    ptr += (14 << data32);
4318
    for(i = 0;i < 8; i++) {
4319
        tmp = ST(i);
4320
        helper_fstt(tmp, ptr);
4321
        ptr += 10;
4322
    }
4323

    
4324
    /* fninit */
4325
    env->fpus = 0;
4326
    env->fpstt = 0;
4327
    env->fpuc = 0x37f;
4328
    env->fptags[0] = 1;
4329
    env->fptags[1] = 1;
4330
    env->fptags[2] = 1;
4331
    env->fptags[3] = 1;
4332
    env->fptags[4] = 1;
4333
    env->fptags[5] = 1;
4334
    env->fptags[6] = 1;
4335
    env->fptags[7] = 1;
4336
}
4337

    
4338
void helper_frstor(target_ulong ptr, int data32)
4339
{
4340
    CPU86_LDouble tmp;
4341
    int i;
4342

    
4343
    helper_fldenv(ptr, data32);
4344
    ptr += (14 << data32);
4345

    
4346
    for(i = 0;i < 8; i++) {
4347
        tmp = helper_fldt(ptr);
4348
        ST(i) = tmp;
4349
        ptr += 10;
4350
    }
4351
}
4352

    
4353
void helper_fxsave(target_ulong ptr, int data64)
4354
{
4355
    int fpus, fptag, i, nb_xmm_regs;
4356
    CPU86_LDouble tmp;
4357
    target_ulong addr;
4358

    
4359
    /* The operand must be 16 byte aligned */
4360
    if (ptr & 0xf) {
4361
        raise_exception(EXCP0D_GPF);
4362
    }
4363

    
4364
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4365
    fptag = 0;
4366
    for(i = 0; i < 8; i++) {
4367
        fptag |= (env->fptags[i] << i);
4368
    }
4369
    stw(ptr, env->fpuc);
4370
    stw(ptr + 2, fpus);
4371
    stw(ptr + 4, fptag ^ 0xff);
4372
#ifdef TARGET_X86_64
4373
    if (data64) {
4374
        stq(ptr + 0x08, 0); /* rip */
4375
        stq(ptr + 0x10, 0); /* rdp */
4376
    } else 
4377
#endif
4378
    {
4379
        stl(ptr + 0x08, 0); /* eip */
4380
        stl(ptr + 0x0c, 0); /* sel  */
4381
        stl(ptr + 0x10, 0); /* dp */
4382
        stl(ptr + 0x14, 0); /* sel  */
4383
    }
4384

    
4385
    addr = ptr + 0x20;
4386
    for(i = 0;i < 8; i++) {
4387
        tmp = ST(i);
4388
        helper_fstt(tmp, addr);
4389
        addr += 16;
4390
    }
4391

    
4392
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4393
        /* XXX: finish it */
4394
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4395
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4396
        if (env->hflags & HF_CS64_MASK)
4397
            nb_xmm_regs = 16;
4398
        else
4399
            nb_xmm_regs = 8;
4400
        addr = ptr + 0xa0;
4401
        /* Fast FXSAVE leaves out the XMM registers */
4402
        if (!(env->efer & MSR_EFER_FFXSR)
4403
          || (env->hflags & HF_CPL_MASK)
4404
          || !(env->hflags & HF_LMA_MASK)) {
4405
            for(i = 0; i < nb_xmm_regs; i++) {
4406
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4407
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4408
                addr += 16;
4409
            }
4410
        }
4411
    }
4412
}
4413

    
4414
void helper_fxrstor(target_ulong ptr, int data64)
4415
{
4416
    int i, fpus, fptag, nb_xmm_regs;
4417
    CPU86_LDouble tmp;
4418
    target_ulong addr;
4419

    
4420
    /* The operand must be 16 byte aligned */
4421
    if (ptr & 0xf) {
4422
        raise_exception(EXCP0D_GPF);
4423
    }
4424

    
4425
    env->fpuc = lduw(ptr);
4426
    fpus = lduw(ptr + 2);
4427
    fptag = lduw(ptr + 4);
4428
    env->fpstt = (fpus >> 11) & 7;
4429
    env->fpus = fpus & ~0x3800;
4430
    fptag ^= 0xff;
4431
    for(i = 0;i < 8; i++) {
4432
        env->fptags[i] = ((fptag >> i) & 1);
4433
    }
4434

    
4435
    addr = ptr + 0x20;
4436
    for(i = 0;i < 8; i++) {
4437
        tmp = helper_fldt(addr);
4438
        ST(i) = tmp;
4439
        addr += 16;
4440
    }
4441

    
4442
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4443
        /* XXX: finish it */
4444
        env->mxcsr = ldl(ptr + 0x18);
4445
        //ldl(ptr + 0x1c);
4446
        if (env->hflags & HF_CS64_MASK)
4447
            nb_xmm_regs = 16;
4448
        else
4449
            nb_xmm_regs = 8;
4450
        addr = ptr + 0xa0;
4451
        /* Fast FXRESTORE leaves out the XMM registers */
4452
        if (!(env->efer & MSR_EFER_FFXSR)
4453
          || (env->hflags & HF_CPL_MASK)
4454
          || !(env->hflags & HF_LMA_MASK)) {
4455
            for(i = 0; i < nb_xmm_regs; i++) {
4456
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4457
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4458
                addr += 16;
4459
            }
4460
        }
4461
    }
4462
}
4463

    
4464
#ifndef USE_X86LDOUBLE
4465

    
4466
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4467
{
4468
    CPU86_LDoubleU temp;
4469
    int e;
4470

    
4471
    temp.d = f;
4472
    /* mantissa */
4473
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4474
    /* exponent + sign */
4475
    e = EXPD(temp) - EXPBIAS + 16383;
4476
    e |= SIGND(temp) >> 16;
4477
    *pexp = e;
4478
}
4479

    
4480
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4481
{
4482
    CPU86_LDoubleU temp;
4483
    int e;
4484
    uint64_t ll;
4485

    
4486
    /* XXX: handle overflow ? */
4487
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4488
    e |= (upper >> 4) & 0x800; /* sign */
4489
    ll = (mant >> 11) & ((1LL << 52) - 1);
4490
#ifdef __arm__
4491
    temp.l.upper = (e << 20) | (ll >> 32);
4492
    temp.l.lower = ll;
4493
#else
4494
    temp.ll = ll | ((uint64_t)e << 52);
4495
#endif
4496
    return temp.d;
4497
}
4498

    
4499
#else
4500

    
4501
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4502
{
4503
    CPU86_LDoubleU temp;
4504

    
4505
    temp.d = f;
4506
    *pmant = temp.l.lower;
4507
    *pexp = temp.l.upper;
4508
}
4509

    
4510
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4511
{
4512
    CPU86_LDoubleU temp;
4513

    
4514
    temp.l.upper = upper;
4515
    temp.l.lower = mant;
4516
    return temp.d;
4517
}
4518
#endif
4519

    
4520
#ifdef TARGET_X86_64
4521

    
4522
//#define DEBUG_MULDIV
4523

    
4524
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4525
{
4526
    *plow += a;
4527
    /* carry test */
4528
    if (*plow < a)
4529
        (*phigh)++;
4530
    *phigh += b;
4531
}
4532

    
4533
static void neg128(uint64_t *plow, uint64_t *phigh)
4534
{
4535
    *plow = ~ *plow;
4536
    *phigh = ~ *phigh;
4537
    add128(plow, phigh, 1, 0);
4538
}
4539

    
4540
/* return TRUE if overflow */
4541
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4542
{
4543
    uint64_t q, r, a1, a0;
4544
    int i, qb, ab;
4545

    
4546
    a0 = *plow;
4547
    a1 = *phigh;
4548
    if (a1 == 0) {
4549
        q = a0 / b;
4550
        r = a0 % b;
4551
        *plow = q;
4552
        *phigh = r;
4553
    } else {
4554
        if (a1 >= b)
4555
            return 1;
4556
        /* XXX: use a better algorithm */
4557
        for(i = 0; i < 64; i++) {
4558
            ab = a1 >> 63;
4559
            a1 = (a1 << 1) | (a0 >> 63);
4560
            if (ab || a1 >= b) {
4561
                a1 -= b;
4562
                qb = 1;
4563
            } else {
4564
                qb = 0;
4565
            }
4566
            a0 = (a0 << 1) | qb;
4567
        }
4568
#if defined(DEBUG_MULDIV)
4569
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4570
               *phigh, *plow, b, a0, a1);
4571
#endif
4572
        *plow = a0;
4573
        *phigh = a1;
4574
    }
4575
    return 0;
4576
}
4577

    
4578
/* return TRUE if overflow */
4579
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4580
{
4581
    int sa, sb;
4582
    sa = ((int64_t)*phigh < 0);
4583
    if (sa)
4584
        neg128(plow, phigh);
4585
    sb = (b < 0);
4586
    if (sb)
4587
        b = -b;
4588
    if (div64(plow, phigh, b) != 0)
4589
        return 1;
4590
    if (sa ^ sb) {
4591
        if (*plow > (1ULL << 63))
4592
            return 1;
4593
        *plow = - *plow;
4594
    } else {
4595
        if (*plow >= (1ULL << 63))
4596
            return 1;
4597
    }
4598
    if (sa)
4599
        *phigh = - *phigh;
4600
    return 0;
4601
}
4602

    
4603
void helper_mulq_EAX_T0(target_ulong t0)
4604
{
4605
    uint64_t r0, r1;
4606

    
4607
    mulu64(&r0, &r1, EAX, t0);
4608
    EAX = r0;
4609
    EDX = r1;
4610
    CC_DST = r0;
4611
    CC_SRC = r1;
4612
}
4613

    
4614
void helper_imulq_EAX_T0(target_ulong t0)
4615
{
4616
    uint64_t r0, r1;
4617

    
4618
    muls64(&r0, &r1, EAX, t0);
4619
    EAX = r0;
4620
    EDX = r1;
4621
    CC_DST = r0;
4622
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4623
}
4624

    
4625
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4626
{
4627
    uint64_t r0, r1;
4628

    
4629
    muls64(&r0, &r1, t0, t1);
4630
    CC_DST = r0;
4631
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4632
    return r0;
4633
}
4634

    
4635
void helper_divq_EAX(target_ulong t0)
4636
{
4637
    uint64_t r0, r1;
4638
    if (t0 == 0) {
4639
        raise_exception(EXCP00_DIVZ);
4640
    }
4641
    r0 = EAX;
4642
    r1 = EDX;
4643
    if (div64(&r0, &r1, t0))
4644
        raise_exception(EXCP00_DIVZ);
4645
    EAX = r0;
4646
    EDX = r1;
4647
}
4648

    
4649
void helper_idivq_EAX(target_ulong t0)
4650
{
4651
    uint64_t r0, r1;
4652
    if (t0 == 0) {
4653
        raise_exception(EXCP00_DIVZ);
4654
    }
4655
    r0 = EAX;
4656
    r1 = EDX;
4657
    if (idiv64(&r0, &r1, t0))
4658
        raise_exception(EXCP00_DIVZ);
4659
    EAX = r0;
4660
    EDX = r1;
4661
}
4662
#endif
4663

    
4664
static void do_hlt(void)
4665
{
4666
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4667
    env->halted = 1;
4668
    env->exception_index = EXCP_HLT;
4669
    cpu_loop_exit();
4670
}
4671

    
4672
void helper_hlt(int next_eip_addend)
4673
{
4674
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4675
    EIP += next_eip_addend;
4676
    
4677
    do_hlt();
4678
}
4679

    
4680
void helper_monitor(target_ulong ptr)
4681
{
4682
    if ((uint32_t)ECX != 0)
4683
        raise_exception(EXCP0D_GPF);
4684
    /* XXX: store address ? */
4685
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4686
}
4687

    
4688
void helper_mwait(int next_eip_addend)
4689
{
4690
    if ((uint32_t)ECX != 0)
4691
        raise_exception(EXCP0D_GPF);
4692
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4693
    EIP += next_eip_addend;
4694

    
4695
    /* XXX: not complete but not completely erroneous */
4696
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4697
        /* more than one CPU: do not sleep because another CPU may
4698
           wake this one */
4699
    } else {
4700
        do_hlt();
4701
    }
4702
}
4703

    
4704
void helper_debug(void)
4705
{
4706
    env->exception_index = EXCP_DEBUG;
4707
    cpu_loop_exit();
4708
}
4709

    
4710
void helper_reset_rf(void)
4711
{
4712
    env->eflags &= ~RF_MASK;
4713
}
4714

    
4715
void helper_raise_interrupt(int intno, int next_eip_addend)
4716
{
4717
    raise_interrupt(intno, 1, 0, next_eip_addend);
4718
}
4719

    
4720
void helper_raise_exception(int exception_index)
4721
{
4722
    raise_exception(exception_index);
4723
}
4724

    
4725
void helper_cli(void)
4726
{
4727
    env->eflags &= ~IF_MASK;
4728
}
4729

    
4730
void helper_sti(void)
4731
{
4732
    env->eflags |= IF_MASK;
4733
}
4734

    
4735
#if 0
4736
/* vm86plus instructions */
4737
void helper_cli_vm(void)
4738
{
4739
    env->eflags &= ~VIF_MASK;
4740
}
4741

4742
void helper_sti_vm(void)
4743
{
4744
    env->eflags |= VIF_MASK;
4745
    if (env->eflags & VIP_MASK) {
4746
        raise_exception(EXCP0D_GPF);
4747
    }
4748
}
4749
#endif
4750

    
4751
void helper_set_inhibit_irq(void)
4752
{
4753
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4754
}
4755

    
4756
void helper_reset_inhibit_irq(void)
4757
{
4758
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4759
}
4760

    
4761
void helper_boundw(target_ulong a0, int v)
4762
{
4763
    int low, high;
4764
    low = ldsw(a0);
4765
    high = ldsw(a0 + 2);
4766
    v = (int16_t)v;
4767
    if (v < low || v > high) {
4768
        raise_exception(EXCP05_BOUND);
4769
    }
4770
}
4771

    
4772
void helper_boundl(target_ulong a0, int v)
4773
{
4774
    int low, high;
4775
    low = ldl(a0);
4776
    high = ldl(a0 + 4);
4777
    if (v < low || v > high) {
4778
        raise_exception(EXCP05_BOUND);
4779
    }
4780
}
4781

    
4782
static float approx_rsqrt(float a)
4783
{
4784
    return 1.0 / sqrt(a);
4785
}
4786

    
4787
static float approx_rcp(float a)
4788
{
4789
    return 1.0 / a;
4790
}
4791

    
4792
#if !defined(CONFIG_USER_ONLY)
4793

    
4794
#define MMUSUFFIX _mmu
4795

    
4796
#define SHIFT 0
4797
#include "softmmu_template.h"
4798

    
4799
#define SHIFT 1
4800
#include "softmmu_template.h"
4801

    
4802
#define SHIFT 2
4803
#include "softmmu_template.h"
4804

    
4805
#define SHIFT 3
4806
#include "softmmu_template.h"
4807

    
4808
#endif
4809

    
4810
#if !defined(CONFIG_USER_ONLY)
4811
/* try to fill the TLB and return an exception if error. If retaddr is
4812
   NULL, it means that the function was called in C code (i.e. not
4813
   from generated code or from helper.c) */
4814
/* XXX: fix it to restore all registers */
4815
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4816
{
4817
    TranslationBlock *tb;
4818
    int ret;
4819
    unsigned long pc;
4820
    CPUX86State *saved_env;
4821

    
4822
    /* XXX: hack to restore env in all cases, even if not called from
4823
       generated code */
4824
    saved_env = env;
4825
    env = cpu_single_env;
4826

    
4827
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4828
    if (ret) {
4829
        if (retaddr) {
4830
            /* now we have a real cpu fault */
4831
            pc = (unsigned long)retaddr;
4832
            tb = tb_find_pc(pc);
4833
            if (tb) {
4834
                /* the PC is inside the translated code. It means that we have
4835
                   a virtual CPU fault */
4836
                cpu_restore_state(tb, env, pc, NULL);
4837
            }
4838
        }
4839
        raise_exception_err(env->exception_index, env->error_code);
4840
    }
4841
    env = saved_env;
4842
}
4843
#endif
4844

    
4845
/* Secure Virtual Machine helpers */
4846

    
4847
#if defined(CONFIG_USER_ONLY)
4848

    
4849
void helper_vmrun(int aflag, int next_eip_addend)
4850
{ 
4851
}
4852
void helper_vmmcall(void) 
4853
{ 
4854
}
4855
void helper_vmload(int aflag)
4856
{ 
4857
}
4858
void helper_vmsave(int aflag)
4859
{ 
4860
}
4861
void helper_stgi(void)
4862
{
4863
}
4864
void helper_clgi(void)
4865
{
4866
}
4867
void helper_skinit(void) 
4868
{ 
4869
}
4870
void helper_invlpga(int aflag)
4871
{ 
4872
}
4873
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4874
{ 
4875
}
4876
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4877
{
4878
}
4879

    
4880
void helper_svm_check_io(uint32_t port, uint32_t param, 
4881
                         uint32_t next_eip_addend)
4882
{
4883
}
4884
#else
4885

    
4886
static inline void svm_save_seg(target_phys_addr_t addr,
4887
                                const SegmentCache *sc)
4888
{
4889
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4890
             sc->selector);
4891
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4892
             sc->base);
4893
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4894
             sc->limit);
4895
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4896
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4897
}
4898
                                
4899
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4900
{
4901
    unsigned int flags;
4902

    
4903
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4904
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4905
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4906
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4907
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4908
}
4909

    
4910
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4911
                                      CPUState *env, int seg_reg)
4912
{
4913
    SegmentCache sc1, *sc = &sc1;
4914
    svm_load_seg(addr, sc);
4915
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4916
                           sc->base, sc->limit, sc->flags);
4917
}
4918

    
4919
void helper_vmrun(int aflag, int next_eip_addend)
4920
{
4921
    target_ulong addr;
4922
    uint32_t event_inj;
4923
    uint32_t int_ctl;
4924

    
4925
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4926

    
4927
    if (aflag == 2)
4928
        addr = EAX;
4929
    else
4930
        addr = (uint32_t)EAX;
4931

    
4932
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4933

    
4934
    env->vm_vmcb = addr;
4935

    
4936
    /* save the current CPU state in the hsave page */
4937
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4938
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4939

    
4940
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4941
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4942

    
4943
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4944
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4945
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4946
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4948
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4949

    
4950
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4951
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4952

    
4953
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4954
                  &env->segs[R_ES]);
4955
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4956
                 &env->segs[R_CS]);
4957
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4958
                 &env->segs[R_SS]);
4959
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4960
                 &env->segs[R_DS]);
4961

    
4962
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4963
             EIP + next_eip_addend);
4964
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4965
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4966

    
4967
    /* load the interception bitmaps so we do not need to access the
4968
       vmcb in svm mode */
4969
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4970
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4971
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4972
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4973
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4974
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4975

    
4976
    /* enable intercepts */
4977
    env->hflags |= HF_SVMI_MASK;
4978

    
4979
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4980

    
4981
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4982
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4983

    
4984
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4985
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4986

    
4987
    /* clear exit_info_2 so we behave like the real hardware */
4988
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4989

    
4990
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4991
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4992
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4993
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4994
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4995
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4996
    if (int_ctl & V_INTR_MASKING_MASK) {
4997
        env->v_tpr = int_ctl & V_TPR_MASK;
4998
        env->hflags2 |= HF2_VINTR_MASK;
4999
        if (env->eflags & IF_MASK)
5000
            env->hflags2 |= HF2_HIF_MASK;
5001
    }
5002

    
5003
    cpu_load_efer(env, 
5004
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5005
    env->eflags = 0;
5006
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5007
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5008
    CC_OP = CC_OP_EFLAGS;
5009

    
5010
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5011
                       env, R_ES);
5012
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5013
                       env, R_CS);
5014
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5015
                       env, R_SS);
5016
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5017
                       env, R_DS);
5018

    
5019
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5020
    env->eip = EIP;
5021
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5022
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5023
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5024
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5025
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5026

    
5027
    /* FIXME: guest state consistency checks */
5028

    
5029
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5030
        case TLB_CONTROL_DO_NOTHING:
5031
            break;
5032
        case TLB_CONTROL_FLUSH_ALL_ASID:
5033
            /* FIXME: this is not 100% correct but should work for now */
5034
            tlb_flush(env, 1);
5035
        break;
5036
    }
5037

    
5038
    env->hflags2 |= HF2_GIF_MASK;
5039

    
5040
    if (int_ctl & V_IRQ_MASK) {
5041
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5042
    }
5043

    
5044
    /* maybe we need to inject an event */
5045
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5046
    if (event_inj & SVM_EVTINJ_VALID) {
5047
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5048
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5049
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5050

    
5051
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5052
        /* FIXME: need to implement valid_err */
5053
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5054
        case SVM_EVTINJ_TYPE_INTR:
5055
                env->exception_index = vector;
5056
                env->error_code = event_inj_err;
5057
                env->exception_is_int = 0;
5058
                env->exception_next_eip = -1;
5059
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5060
                /* XXX: is it always correct ? */
5061
                do_interrupt(vector, 0, 0, 0, 1);
5062
                break;
5063
        case SVM_EVTINJ_TYPE_NMI:
5064
                env->exception_index = EXCP02_NMI;
5065
                env->error_code = event_inj_err;
5066
                env->exception_is_int = 0;
5067
                env->exception_next_eip = EIP;
5068
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5069
                cpu_loop_exit();
5070
                break;
5071
        case SVM_EVTINJ_TYPE_EXEPT:
5072
                env->exception_index = vector;
5073
                env->error_code = event_inj_err;
5074
                env->exception_is_int = 0;
5075
                env->exception_next_eip = -1;
5076
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5077
                cpu_loop_exit();
5078
                break;
5079
        case SVM_EVTINJ_TYPE_SOFT:
5080
                env->exception_index = vector;
5081
                env->error_code = event_inj_err;
5082
                env->exception_is_int = 1;
5083
                env->exception_next_eip = EIP;
5084
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5085
                cpu_loop_exit();
5086
                break;
5087
        }
5088
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5089
    }
5090
}
5091

    
5092
void helper_vmmcall(void)
5093
{
5094
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5095
    raise_exception(EXCP06_ILLOP);
5096
}
5097

    
5098
void helper_vmload(int aflag)
5099
{
5100
    target_ulong addr;
5101
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5102

    
5103
    if (aflag == 2)
5104
        addr = EAX;
5105
    else
5106
        addr = (uint32_t)EAX;
5107

    
5108
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5109
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5110
                env->segs[R_FS].base);
5111

    
5112
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5113
                       env, R_FS);
5114
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5115
                       env, R_GS);
5116
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5117
                 &env->tr);
5118
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5119
                 &env->ldt);
5120

    
5121
#ifdef TARGET_X86_64
5122
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5123
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5124
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5125
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5126
#endif
5127
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5128
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5129
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5130
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5131
}
5132

    
5133
void helper_vmsave(int aflag)
5134
{
5135
    target_ulong addr;
5136
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5137

    
5138
    if (aflag == 2)
5139
        addr = EAX;
5140
    else
5141
        addr = (uint32_t)EAX;
5142

    
5143
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5144
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5145
                env->segs[R_FS].base);
5146

    
5147
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5148
                 &env->segs[R_FS]);
5149
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5150
                 &env->segs[R_GS]);
5151
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5152
                 &env->tr);
5153
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5154
                 &env->ldt);
5155

    
5156
#ifdef TARGET_X86_64
5157
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5158
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5159
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5160
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5161
#endif
5162
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5163
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5164
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5165
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5166
}
5167

    
5168
void helper_stgi(void)
5169
{
5170
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5171
    env->hflags2 |= HF2_GIF_MASK;
5172
}
5173

    
5174
void helper_clgi(void)
5175
{
5176
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5177
    env->hflags2 &= ~HF2_GIF_MASK;
5178
}
5179

    
5180
void helper_skinit(void)
5181
{
5182
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5183
    /* XXX: not implemented */
5184
    raise_exception(EXCP06_ILLOP);
5185
}
5186

    
5187
void helper_invlpga(int aflag)
5188
{
5189
    target_ulong addr;
5190
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5191
    
5192
    if (aflag == 2)
5193
        addr = EAX;
5194
    else
5195
        addr = (uint32_t)EAX;
5196

    
5197
    /* XXX: could use the ASID to see if it is needed to do the
5198
       flush */
5199
    tlb_flush_page(env, addr);
5200
}
5201

    
5202
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5203
{
5204
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5205
        return;
5206
    switch(type) {
5207
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5208
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5209
            helper_vmexit(type, param);
5210
        }
5211
        break;
5212
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5213
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5214
            helper_vmexit(type, param);
5215
        }
5216
        break;
5217
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5218
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5219
            helper_vmexit(type, param);
5220
        }
5221
        break;
5222
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5223
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5224
            helper_vmexit(type, param);
5225
        }
5226
        break;
5227
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5228
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5229
            helper_vmexit(type, param);
5230
        }
5231
        break;
5232
    case SVM_EXIT_MSR:
5233
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5234
            /* FIXME: this should be read in at vmrun (faster this way?) */
5235
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5236
            uint32_t t0, t1;
5237
            switch((uint32_t)ECX) {
5238
            case 0 ... 0x1fff:
5239
                t0 = (ECX * 2) % 8;
5240
                t1 = ECX / 8;
5241
                break;
5242
            case 0xc0000000 ... 0xc0001fff:
5243
                t0 = (8192 + ECX - 0xc0000000) * 2;
5244
                t1 = (t0 / 8);
5245
                t0 %= 8;
5246
                break;
5247
            case 0xc0010000 ... 0xc0011fff:
5248
                t0 = (16384 + ECX - 0xc0010000) * 2;
5249
                t1 = (t0 / 8);
5250
                t0 %= 8;
5251
                break;
5252
            default:
5253
                helper_vmexit(type, param);
5254
                t0 = 0;
5255
                t1 = 0;
5256
                break;
5257
            }
5258
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5259
                helper_vmexit(type, param);
5260
        }
5261
        break;
5262
    default:
5263
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5264
            helper_vmexit(type, param);
5265
        }
5266
        break;
5267
    }
5268
}
5269

    
5270
void helper_svm_check_io(uint32_t port, uint32_t param, 
5271
                         uint32_t next_eip_addend)
5272
{
5273
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5274
        /* FIXME: this should be read in at vmrun (faster this way?) */
5275
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5276
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5277
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5278
            /* next EIP */
5279
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5280
                     env->eip + next_eip_addend);
5281
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5282
        }
5283
    }
5284
}
5285

    
5286
/* Note: currently only 32 bits of exit_code are used */
5287
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5288
{
5289
    uint32_t int_ctl;
5290

    
5291
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5292
                exit_code, exit_info_1,
5293
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5294
                EIP);
5295

    
5296
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5297
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5298
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5299
    } else {
5300
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5301
    }
5302

    
5303
    /* Save the VM state in the vmcb */
5304
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5305
                 &env->segs[R_ES]);
5306
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5307
                 &env->segs[R_CS]);
5308
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5309
                 &env->segs[R_SS]);
5310
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5311
                 &env->segs[R_DS]);
5312

    
5313
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5314
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5315

    
5316
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5317
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5318

    
5319
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5320
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5321
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5324

    
5325
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5326
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5327
    int_ctl |= env->v_tpr & V_TPR_MASK;
5328
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5329
        int_ctl |= V_IRQ_MASK;
5330
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5331

    
5332
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5333
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5335
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5338
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5339

    
5340
    /* Reload the host state from vm_hsave */
5341
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5342
    env->hflags &= ~HF_SVMI_MASK;
5343
    env->intercept = 0;
5344
    env->intercept_exceptions = 0;
5345
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5346
    env->tsc_offset = 0;
5347

    
5348
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5349
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5350

    
5351
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5352
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5353

    
5354
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5355
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5356
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5357
    /* we need to set the efer after the crs so the hidden flags get
5358
       set properly */
5359
    cpu_load_efer(env, 
5360
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5361
    env->eflags = 0;
5362
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5363
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5364
    CC_OP = CC_OP_EFLAGS;
5365

    
5366
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5367
                       env, R_ES);
5368
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5369
                       env, R_CS);
5370
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5371
                       env, R_SS);
5372
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5373
                       env, R_DS);
5374

    
5375
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5376
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5377
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5378

    
5379
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5380
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5381

    
5382
    /* other setups */
5383
    cpu_x86_set_cpl(env, 0);
5384
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5385
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5386

    
5387
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5388
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5389
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5390
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5391
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5392

    
5393
    env->hflags2 &= ~HF2_GIF_MASK;
5394
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5395

    
5396
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5397

    
5398
    /* Clears the TSC_OFFSET inside the processor. */
5399

    
5400
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5401
       from the page table indicated the host's CR3. If the PDPEs contain
5402
       illegal state, the processor causes a shutdown. */
5403

    
5404
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5405
    env->cr[0] |= CR0_PE_MASK;
5406
    env->eflags &= ~VM_MASK;
5407

    
5408
    /* Disables all breakpoints in the host DR7 register. */
5409

    
5410
    /* Checks the reloaded host state for consistency. */
5411

    
5412
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5413
       host's code segment or non-canonical (in the case of long mode), a
5414
       #GP fault is delivered inside the host.) */
5415

    
5416
    /* remove any pending exception */
5417
    env->exception_index = -1;
5418
    env->error_code = 0;
5419
    env->old_exception = -1;
5420

    
5421
    cpu_loop_exit();
5422
}
5423

    
5424
#endif
5425

    
5426
/* MMX/SSE */
5427
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5428
void helper_enter_mmx(void)
5429
{
5430
    env->fpstt = 0;
5431
    *(uint32_t *)(env->fptags) = 0;
5432
    *(uint32_t *)(env->fptags + 4) = 0;
5433
}
5434

    
5435
void helper_emms(void)
5436
{
5437
    /* set to empty state */
5438
    *(uint32_t *)(env->fptags) = 0x01010101;
5439
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5440
}
5441

    
5442
/* XXX: suppress */
5443
void helper_movq(void *d, void *s)
5444
{
5445
    *(uint64_t *)d = *(uint64_t *)s;
5446
}
5447

    
5448
#define SHIFT 0
5449
#include "ops_sse.h"
5450

    
5451
#define SHIFT 1
5452
#include "ops_sse.h"
5453

    
5454
#define SHIFT 0
5455
#include "helper_template.h"
5456
#undef SHIFT
5457

    
5458
#define SHIFT 1
5459
#include "helper_template.h"
5460
#undef SHIFT
5461

    
5462
#define SHIFT 2
5463
#include "helper_template.h"
5464
#undef SHIFT
5465

    
5466
#ifdef TARGET_X86_64
5467

    
5468
#define SHIFT 3
5469
#include "helper_template.h"
5470
#undef SHIFT
5471

    
5472
#endif
5473

    
5474
/* bit operations */
5475
target_ulong helper_bsf(target_ulong t0)
5476
{
5477
    int count;
5478
    target_ulong res;
5479

    
5480
    res = t0;
5481
    count = 0;
5482
    while ((res & 1) == 0) {
5483
        count++;
5484
        res >>= 1;
5485
    }
5486
    return count;
5487
}
5488

    
5489
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5490
{
5491
    int count;
5492
    target_ulong res, mask;
5493

    
5494
    if (wordsize > 0 && t0 == 0) {
5495
        return wordsize;
5496
    }
5497
    res = t0;
5498
    count = TARGET_LONG_BITS - 1;
5499
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5500
    while ((res & mask) == 0) {
5501
        count--;
5502
        res <<= 1;
5503
    }
5504
    if (wordsize > 0) {
5505
        return wordsize - 1 - count;
5506
    }
5507
    return count;
5508
}
5509

    
5510
target_ulong helper_bsr(target_ulong t0)
5511
{
5512
        return helper_lzcnt(t0, 0);
5513
}
5514

    
5515
static int compute_all_eflags(void)
5516
{
5517
    return CC_SRC;
5518
}
5519

    
5520
static int compute_c_eflags(void)
5521
{
5522
    return CC_SRC & CC_C;
5523
}
5524

    
5525
uint32_t helper_cc_compute_all(int op)
5526
{
5527
    switch (op) {
5528
    default: /* should never happen */ return 0;
5529

    
5530
    case CC_OP_EFLAGS: return compute_all_eflags();
5531

    
5532
    case CC_OP_MULB: return compute_all_mulb();
5533
    case CC_OP_MULW: return compute_all_mulw();
5534
    case CC_OP_MULL: return compute_all_mull();
5535

    
5536
    case CC_OP_ADDB: return compute_all_addb();
5537
    case CC_OP_ADDW: return compute_all_addw();
5538
    case CC_OP_ADDL: return compute_all_addl();
5539

    
5540
    case CC_OP_ADCB: return compute_all_adcb();
5541
    case CC_OP_ADCW: return compute_all_adcw();
5542
    case CC_OP_ADCL: return compute_all_adcl();
5543

    
5544
    case CC_OP_SUBB: return compute_all_subb();
5545
    case CC_OP_SUBW: return compute_all_subw();
5546
    case CC_OP_SUBL: return compute_all_subl();
5547

    
5548
    case CC_OP_SBBB: return compute_all_sbbb();
5549
    case CC_OP_SBBW: return compute_all_sbbw();
5550
    case CC_OP_SBBL: return compute_all_sbbl();
5551

    
5552
    case CC_OP_LOGICB: return compute_all_logicb();
5553
    case CC_OP_LOGICW: return compute_all_logicw();
5554
    case CC_OP_LOGICL: return compute_all_logicl();
5555

    
5556
    case CC_OP_INCB: return compute_all_incb();
5557
    case CC_OP_INCW: return compute_all_incw();
5558
    case CC_OP_INCL: return compute_all_incl();
5559

    
5560
    case CC_OP_DECB: return compute_all_decb();
5561
    case CC_OP_DECW: return compute_all_decw();
5562
    case CC_OP_DECL: return compute_all_decl();
5563

    
5564
    case CC_OP_SHLB: return compute_all_shlb();
5565
    case CC_OP_SHLW: return compute_all_shlw();
5566
    case CC_OP_SHLL: return compute_all_shll();
5567

    
5568
    case CC_OP_SARB: return compute_all_sarb();
5569
    case CC_OP_SARW: return compute_all_sarw();
5570
    case CC_OP_SARL: return compute_all_sarl();
5571

    
5572
#ifdef TARGET_X86_64
5573
    case CC_OP_MULQ: return compute_all_mulq();
5574

    
5575
    case CC_OP_ADDQ: return compute_all_addq();
5576

    
5577
    case CC_OP_ADCQ: return compute_all_adcq();
5578

    
5579
    case CC_OP_SUBQ: return compute_all_subq();
5580

    
5581
    case CC_OP_SBBQ: return compute_all_sbbq();
5582

    
5583
    case CC_OP_LOGICQ: return compute_all_logicq();
5584

    
5585
    case CC_OP_INCQ: return compute_all_incq();
5586

    
5587
    case CC_OP_DECQ: return compute_all_decq();
5588

    
5589
    case CC_OP_SHLQ: return compute_all_shlq();
5590

    
5591
    case CC_OP_SARQ: return compute_all_sarq();
5592
#endif
5593
    }
5594
}
5595

    
5596
uint32_t helper_cc_compute_c(int op)
5597
{
5598
    switch (op) {
5599
    default: /* should never happen */ return 0;
5600

    
5601
    case CC_OP_EFLAGS: return compute_c_eflags();
5602

    
5603
    case CC_OP_MULB: return compute_c_mull();
5604
    case CC_OP_MULW: return compute_c_mull();
5605
    case CC_OP_MULL: return compute_c_mull();
5606

    
5607
    case CC_OP_ADDB: return compute_c_addb();
5608
    case CC_OP_ADDW: return compute_c_addw();
5609
    case CC_OP_ADDL: return compute_c_addl();
5610

    
5611
    case CC_OP_ADCB: return compute_c_adcb();
5612
    case CC_OP_ADCW: return compute_c_adcw();
5613
    case CC_OP_ADCL: return compute_c_adcl();
5614

    
5615
    case CC_OP_SUBB: return compute_c_subb();
5616
    case CC_OP_SUBW: return compute_c_subw();
5617
    case CC_OP_SUBL: return compute_c_subl();
5618

    
5619
    case CC_OP_SBBB: return compute_c_sbbb();
5620
    case CC_OP_SBBW: return compute_c_sbbw();
5621
    case CC_OP_SBBL: return compute_c_sbbl();
5622

    
5623
    case CC_OP_LOGICB: return compute_c_logicb();
5624
    case CC_OP_LOGICW: return compute_c_logicw();
5625
    case CC_OP_LOGICL: return compute_c_logicl();
5626

    
5627
    case CC_OP_INCB: return compute_c_incl();
5628
    case CC_OP_INCW: return compute_c_incl();
5629
    case CC_OP_INCL: return compute_c_incl();
5630

    
5631
    case CC_OP_DECB: return compute_c_incl();
5632
    case CC_OP_DECW: return compute_c_incl();
5633
    case CC_OP_DECL: return compute_c_incl();
5634

    
5635
    case CC_OP_SHLB: return compute_c_shlb();
5636
    case CC_OP_SHLW: return compute_c_shlw();
5637
    case CC_OP_SHLL: return compute_c_shll();
5638

    
5639
    case CC_OP_SARB: return compute_c_sarl();
5640
    case CC_OP_SARW: return compute_c_sarl();
5641
    case CC_OP_SARL: return compute_c_sarl();
5642

    
5643
#ifdef TARGET_X86_64
5644
    case CC_OP_MULQ: return compute_c_mull();
5645

    
5646
    case CC_OP_ADDQ: return compute_c_addq();
5647

    
5648
    case CC_OP_ADCQ: return compute_c_adcq();
5649

    
5650
    case CC_OP_SUBQ: return compute_c_subq();
5651

    
5652
    case CC_OP_SBBQ: return compute_c_sbbq();
5653

    
5654
    case CC_OP_LOGICQ: return compute_c_logicq();
5655

    
5656
    case CC_OP_INCQ: return compute_c_incl();
5657

    
5658
    case CC_OP_DECQ: return compute_c_incl();
5659

    
5660
    case CC_OP_SHLQ: return compute_c_shlq();
5661

    
5662
    case CC_OP_SARQ: return compute_c_sarl();
5663
#endif
5664
    }
5665
}