Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 66fcf8ff

History | View | Annotate | Download (160 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <math.h>
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24
#include "ioport.h"
25

    
26
//#define DEBUG_PCALL
27

    
28

    
29
#ifdef DEBUG_PCALL
30
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31
#  define LOG_PCALL_STATE(env) \
32
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33
#else
34
#  define LOG_PCALL(...) do { } while (0)
35
#  define LOG_PCALL_STATE(env) do { } while (0)
36
#endif
37

    
38

    
39
#if 0
40
#define raise_exception_err(a, b)\
41
do {\
42
    qemu_log("raise_exception line=%d\n", __LINE__);\
43
    (raise_exception_err)(a, b);\
44
} while (0)
45
#endif
46

    
47
static const uint8_t parity_table[256] = {
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80
};
81

    
82
/* modulo 17 table */
83
static const uint8_t rclw_table[32] = {
84
    0, 1, 2, 3, 4, 5, 6, 7,
85
    8, 9,10,11,12,13,14,15,
86
   16, 0, 1, 2, 3, 4, 5, 6,
87
    7, 8, 9,10,11,12,13,14,
88
};
89

    
90
/* modulo 9 table */
91
static const uint8_t rclb_table[32] = {
92
    0, 1, 2, 3, 4, 5, 6, 7,
93
    8, 0, 1, 2, 3, 4, 5, 6,
94
    7, 8, 0, 1, 2, 3, 4, 5,
95
    6, 7, 8, 0, 1, 2, 3, 4,
96
};
97

    
98
#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99
#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100
#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101

    
102
/* broken thread support */
103

    
104
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
105

    
106
void helper_lock(void)
107
{
108
    spin_lock(&global_cpu_lock);
109
}
110

    
111
void helper_unlock(void)
112
{
113
    spin_unlock(&global_cpu_lock);
114
}
115

    
116
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
117
{
118
    load_eflags(t0, update_mask);
119
}
120

    
121
target_ulong helper_read_eflags(void)
122
{
123
    uint32_t eflags;
124
    eflags = helper_cc_compute_all(CC_OP);
125
    eflags |= (DF & DF_MASK);
126
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
127
    return eflags;
128
}
129

    
130
/* return non zero if error */
131
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
132
                               int selector)
133
{
134
    SegmentCache *dt;
135
    int index;
136
    target_ulong ptr;
137

    
138
    if (selector & 0x4)
139
        dt = &env->ldt;
140
    else
141
        dt = &env->gdt;
142
    index = selector & ~7;
143
    if ((index + 7) > dt->limit)
144
        return -1;
145
    ptr = dt->base + index;
146
    *e1_ptr = ldl_kernel(ptr);
147
    *e2_ptr = ldl_kernel(ptr + 4);
148
    return 0;
149
}
150

    
151
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
152
{
153
    unsigned int limit;
154
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
155
    if (e2 & DESC_G_MASK)
156
        limit = (limit << 12) | 0xfff;
157
    return limit;
158
}
159

    
160
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161
{
162
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
163
}
164

    
165
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166
{
167
    sc->base = get_seg_base(e1, e2);
168
    sc->limit = get_seg_limit(e1, e2);
169
    sc->flags = e2;
170
}
171

    
172
/* init the segment cache in vm86 mode. */
173
static inline void load_seg_vm(int seg, int selector)
174
{
175
    selector &= 0xffff;
176
    cpu_x86_load_seg_cache(env, seg, selector,
177
                           (selector << 4), 0xffff, 0);
178
}
179

    
180
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
181
                                       uint32_t *esp_ptr, int dpl)
182
{
183
    int type, index, shift;
184

    
185
#if 0
186
    {
187
        int i;
188
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
189
        for(i=0;i<env->tr.limit;i++) {
190
            printf("%02x ", env->tr.base[i]);
191
            if ((i & 7) == 7) printf("\n");
192
        }
193
        printf("\n");
194
    }
195
#endif
196

    
197
    if (!(env->tr.flags & DESC_P_MASK))
198
        cpu_abort(env, "invalid tss");
199
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
200
    if ((type & 7) != 1)
201
        cpu_abort(env, "invalid tss type");
202
    shift = type >> 3;
203
    index = (dpl * 4 + 2) << shift;
204
    if (index + (4 << shift) - 1 > env->tr.limit)
205
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
206
    if (shift == 0) {
207
        *esp_ptr = lduw_kernel(env->tr.base + index);
208
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
209
    } else {
210
        *esp_ptr = ldl_kernel(env->tr.base + index);
211
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
212
    }
213
}
214

    
215
/* XXX: merge with load_seg() */
216
static void tss_load_seg(int seg_reg, int selector)
217
{
218
    uint32_t e1, e2;
219
    int rpl, dpl, cpl;
220

    
221
    if ((selector & 0xfffc) != 0) {
222
        if (load_segment(&e1, &e2, selector) != 0)
223
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224
        if (!(e2 & DESC_S_MASK))
225
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226
        rpl = selector & 3;
227
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
228
        cpl = env->hflags & HF_CPL_MASK;
229
        if (seg_reg == R_CS) {
230
            if (!(e2 & DESC_CS_MASK))
231
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
            /* XXX: is it correct ? */
233
            if (dpl != rpl)
234
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235
            if ((e2 & DESC_C_MASK) && dpl > rpl)
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
        } else if (seg_reg == R_SS) {
238
            /* SS must be writable data */
239
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if (dpl != cpl || dpl != rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else {
244
            /* not readable code */
245
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            /* if data or non conforming code, checks the rights */
248
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
249
                if (dpl < cpl || dpl < rpl)
250
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251
            }
252
        }
253
        if (!(e2 & DESC_P_MASK))
254
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
255
        cpu_x86_load_seg_cache(env, seg_reg, selector,
256
                       get_seg_base(e1, e2),
257
                       get_seg_limit(e1, e2),
258
                       e2);
259
    } else {
260
        if (seg_reg == R_SS || seg_reg == R_CS)
261
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
262
    }
263
}
264

    
265
#define SWITCH_TSS_JMP  0
266
#define SWITCH_TSS_IRET 1
267
#define SWITCH_TSS_CALL 2
268

    
269
/* XXX: restore CPU state in registers (PowerPC case) */
270
static void switch_tss(int tss_selector,
271
                       uint32_t e1, uint32_t e2, int source,
272
                       uint32_t next_eip)
273
{
274
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275
    target_ulong tss_base;
276
    uint32_t new_regs[8], new_segs[6];
277
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278
    uint32_t old_eflags, eflags_mask;
279
    SegmentCache *dt;
280
    int index;
281
    target_ulong ptr;
282

    
283
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
284
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
285

    
286
    /* if task gate, we read the TSS segment and we load it */
287
    if (type == 5) {
288
        if (!(e2 & DESC_P_MASK))
289
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
290
        tss_selector = e1 >> 16;
291
        if (tss_selector & 4)
292
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
293
        if (load_segment(&e1, &e2, tss_selector) != 0)
294
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295
        if (e2 & DESC_S_MASK)
296
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
298
        if ((type & 7) != 1)
299
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
    }
301

    
302
    if (!(e2 & DESC_P_MASK))
303
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
304

    
305
    if (type & 8)
306
        tss_limit_max = 103;
307
    else
308
        tss_limit_max = 43;
309
    tss_limit = get_seg_limit(e1, e2);
310
    tss_base = get_seg_base(e1, e2);
311
    if ((tss_selector & 4) != 0 ||
312
        tss_limit < tss_limit_max)
313
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
314
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
315
    if (old_type & 8)
316
        old_tss_limit_max = 103;
317
    else
318
        old_tss_limit_max = 43;
319

    
320
    /* read all the registers from the new TSS */
321
    if (type & 8) {
322
        /* 32 bit */
323
        new_cr3 = ldl_kernel(tss_base + 0x1c);
324
        new_eip = ldl_kernel(tss_base + 0x20);
325
        new_eflags = ldl_kernel(tss_base + 0x24);
326
        for(i = 0; i < 8; i++)
327
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
328
        for(i = 0; i < 6; i++)
329
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
330
        new_ldt = lduw_kernel(tss_base + 0x60);
331
        new_trap = ldl_kernel(tss_base + 0x64);
332
    } else {
333
        /* 16 bit */
334
        new_cr3 = 0;
335
        new_eip = lduw_kernel(tss_base + 0x0e);
336
        new_eflags = lduw_kernel(tss_base + 0x10);
337
        for(i = 0; i < 8; i++)
338
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
339
        for(i = 0; i < 4; i++)
340
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
341
        new_ldt = lduw_kernel(tss_base + 0x2a);
342
        new_segs[R_FS] = 0;
343
        new_segs[R_GS] = 0;
344
        new_trap = 0;
345
    }
346
    /* XXX: avoid a compiler warning, see
347
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
348
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
349
    (void)new_trap;
350

    
351
    /* NOTE: we must avoid memory exceptions during the task switch,
352
       so we make dummy accesses before */
353
    /* XXX: it can still fail in some cases, so a bigger hack is
354
       necessary to valid the TLB after having done the accesses */
355

    
356
    v1 = ldub_kernel(env->tr.base);
357
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
358
    stb_kernel(env->tr.base, v1);
359
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
360

    
361
    /* clear busy bit (it is restartable) */
362
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
363
        target_ulong ptr;
364
        uint32_t e2;
365
        ptr = env->gdt.base + (env->tr.selector & ~7);
366
        e2 = ldl_kernel(ptr + 4);
367
        e2 &= ~DESC_TSS_BUSY_MASK;
368
        stl_kernel(ptr + 4, e2);
369
    }
370
    old_eflags = compute_eflags();
371
    if (source == SWITCH_TSS_IRET)
372
        old_eflags &= ~NT_MASK;
373

    
374
    /* save the current state in the old TSS */
375
    if (type & 8) {
376
        /* 32 bit */
377
        stl_kernel(env->tr.base + 0x20, next_eip);
378
        stl_kernel(env->tr.base + 0x24, old_eflags);
379
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
380
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
381
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
382
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
383
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
384
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
385
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
386
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
387
        for(i = 0; i < 6; i++)
388
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
389
    } else {
390
        /* 16 bit */
391
        stw_kernel(env->tr.base + 0x0e, next_eip);
392
        stw_kernel(env->tr.base + 0x10, old_eflags);
393
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
394
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
395
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
396
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
397
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
398
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
399
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
400
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
401
        for(i = 0; i < 4; i++)
402
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
403
    }
404

    
405
    /* now if an exception occurs, it will occurs in the next task
406
       context */
407

    
408
    if (source == SWITCH_TSS_CALL) {
409
        stw_kernel(tss_base, env->tr.selector);
410
        new_eflags |= NT_MASK;
411
    }
412

    
413
    /* set busy bit */
414
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
415
        target_ulong ptr;
416
        uint32_t e2;
417
        ptr = env->gdt.base + (tss_selector & ~7);
418
        e2 = ldl_kernel(ptr + 4);
419
        e2 |= DESC_TSS_BUSY_MASK;
420
        stl_kernel(ptr + 4, e2);
421
    }
422

    
423
    /* set the new CPU state */
424
    /* from this point, any exception which occurs can give problems */
425
    env->cr[0] |= CR0_TS_MASK;
426
    env->hflags |= HF_TS_MASK;
427
    env->tr.selector = tss_selector;
428
    env->tr.base = tss_base;
429
    env->tr.limit = tss_limit;
430
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
431

    
432
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
433
        cpu_x86_update_cr3(env, new_cr3);
434
    }
435

    
436
    /* load all registers without an exception, then reload them with
437
       possible exception */
438
    env->eip = new_eip;
439
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
440
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
441
    if (!(type & 8))
442
        eflags_mask &= 0xffff;
443
    load_eflags(new_eflags, eflags_mask);
444
    /* XXX: what to do in 16 bit case ? */
445
    EAX = new_regs[0];
446
    ECX = new_regs[1];
447
    EDX = new_regs[2];
448
    EBX = new_regs[3];
449
    ESP = new_regs[4];
450
    EBP = new_regs[5];
451
    ESI = new_regs[6];
452
    EDI = new_regs[7];
453
    if (new_eflags & VM_MASK) {
454
        for(i = 0; i < 6; i++)
455
            load_seg_vm(i, new_segs[i]);
456
        /* in vm86, CPL is always 3 */
457
        cpu_x86_set_cpl(env, 3);
458
    } else {
459
        /* CPL is set the RPL of CS */
460
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
461
        /* first just selectors as the rest may trigger exceptions */
462
        for(i = 0; i < 6; i++)
463
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
464
    }
465

    
466
    env->ldt.selector = new_ldt & ~4;
467
    env->ldt.base = 0;
468
    env->ldt.limit = 0;
469
    env->ldt.flags = 0;
470

    
471
    /* load the LDT */
472
    if (new_ldt & 4)
473
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474

    
475
    if ((new_ldt & 0xfffc) != 0) {
476
        dt = &env->gdt;
477
        index = new_ldt & ~7;
478
        if ((index + 7) > dt->limit)
479
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480
        ptr = dt->base + index;
481
        e1 = ldl_kernel(ptr);
482
        e2 = ldl_kernel(ptr + 4);
483
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
484
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
485
        if (!(e2 & DESC_P_MASK))
486
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
488
    }
489

    
490
    /* load the segments */
491
    if (!(new_eflags & VM_MASK)) {
492
        tss_load_seg(R_CS, new_segs[R_CS]);
493
        tss_load_seg(R_SS, new_segs[R_SS]);
494
        tss_load_seg(R_ES, new_segs[R_ES]);
495
        tss_load_seg(R_DS, new_segs[R_DS]);
496
        tss_load_seg(R_FS, new_segs[R_FS]);
497
        tss_load_seg(R_GS, new_segs[R_GS]);
498
    }
499

    
500
    /* check that EIP is in the CS segment limits */
501
    if (new_eip > env->segs[R_CS].limit) {
502
        /* XXX: different exception if CALL ? */
503
        raise_exception_err(EXCP0D_GPF, 0);
504
    }
505

    
506
#ifndef CONFIG_USER_ONLY
507
    /* reset local breakpoints */
508
    if (env->dr[7] & 0x55) {
509
        for (i = 0; i < 4; i++) {
510
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
511
                hw_breakpoint_remove(env, i);
512
        }
513
        env->dr[7] &= ~0x55;
514
    }
515
#endif
516
}
517

    
518
/* check if Port I/O is allowed in TSS */
519
static inline void check_io(int addr, int size)
520
{
521
    int io_offset, val, mask;
522

    
523
    /* TSS must be a valid 32 bit one */
524
    if (!(env->tr.flags & DESC_P_MASK) ||
525
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
526
        env->tr.limit < 103)
527
        goto fail;
528
    io_offset = lduw_kernel(env->tr.base + 0x66);
529
    io_offset += (addr >> 3);
530
    /* Note: the check needs two bytes */
531
    if ((io_offset + 1) > env->tr.limit)
532
        goto fail;
533
    val = lduw_kernel(env->tr.base + io_offset);
534
    val >>= (addr & 7);
535
    mask = (1 << size) - 1;
536
    /* all bits must be zero to allow the I/O */
537
    if ((val & mask) != 0) {
538
    fail:
539
        raise_exception_err(EXCP0D_GPF, 0);
540
    }
541
}
542

    
543
void helper_check_iob(uint32_t t0)
544
{
545
    check_io(t0, 1);
546
}
547

    
548
void helper_check_iow(uint32_t t0)
549
{
550
    check_io(t0, 2);
551
}
552

    
553
void helper_check_iol(uint32_t t0)
554
{
555
    check_io(t0, 4);
556
}
557

    
558
void helper_outb(uint32_t port, uint32_t data)
559
{
560
    cpu_outb(port, data & 0xff);
561
}
562

    
563
target_ulong helper_inb(uint32_t port)
564
{
565
    return cpu_inb(port);
566
}
567

    
568
void helper_outw(uint32_t port, uint32_t data)
569
{
570
    cpu_outw(port, data & 0xffff);
571
}
572

    
573
target_ulong helper_inw(uint32_t port)
574
{
575
    return cpu_inw(port);
576
}
577

    
578
void helper_outl(uint32_t port, uint32_t data)
579
{
580
    cpu_outl(port, data);
581
}
582

    
583
target_ulong helper_inl(uint32_t port)
584
{
585
    return cpu_inl(port);
586
}
587

    
588
static inline unsigned int get_sp_mask(unsigned int e2)
589
{
590
    if (e2 & DESC_B_MASK)
591
        return 0xffffffff;
592
    else
593
        return 0xffff;
594
}
595

    
596
static int exeption_has_error_code(int intno)
597
{
598
        switch(intno) {
599
        case 8:
600
        case 10:
601
        case 11:
602
        case 12:
603
        case 13:
604
        case 14:
605
        case 17:
606
            return 1;
607
        }
608
        return 0;
609
}
610

    
611
#ifdef TARGET_X86_64
612
#define SET_ESP(val, sp_mask)\
613
do {\
614
    if ((sp_mask) == 0xffff)\
615
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
616
    else if ((sp_mask) == 0xffffffffLL)\
617
        ESP = (uint32_t)(val);\
618
    else\
619
        ESP = (val);\
620
} while (0)
621
#else
622
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
623
#endif
624

    
625
/* in 64-bit machines, this can overflow. So this segment addition macro
626
 * can be used to trim the value to 32-bit whenever needed */
627
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
628

    
629
/* XXX: add a is_user flag to have proper security support */
630
#define PUSHW(ssp, sp, sp_mask, val)\
631
{\
632
    sp -= 2;\
633
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
634
}
635

    
636
#define PUSHL(ssp, sp, sp_mask, val)\
637
{\
638
    sp -= 4;\
639
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
640
}
641

    
642
#define POPW(ssp, sp, sp_mask, val)\
643
{\
644
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
645
    sp += 2;\
646
}
647

    
648
#define POPL(ssp, sp, sp_mask, val)\
649
{\
650
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
651
    sp += 4;\
652
}
653

    
654
/* protected mode interrupt */
655
static void do_interrupt_protected(int intno, int is_int, int error_code,
656
                                   unsigned int next_eip, int is_hw)
657
{
658
    SegmentCache *dt;
659
    target_ulong ptr, ssp;
660
    int type, dpl, selector, ss_dpl, cpl;
661
    int has_error_code, new_stack, shift;
662
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
663
    uint32_t old_eip, sp_mask;
664

    
665
    has_error_code = 0;
666
    if (!is_int && !is_hw)
667
        has_error_code = exeption_has_error_code(intno);
668
    if (is_int)
669
        old_eip = next_eip;
670
    else
671
        old_eip = env->eip;
672

    
673
    dt = &env->idt;
674
    if (intno * 8 + 7 > dt->limit)
675
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676
    ptr = dt->base + intno * 8;
677
    e1 = ldl_kernel(ptr);
678
    e2 = ldl_kernel(ptr + 4);
679
    /* check gate type */
680
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
681
    switch(type) {
682
    case 5: /* task gate */
683
        /* must do that check here to return the correct error code */
684
        if (!(e2 & DESC_P_MASK))
685
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
686
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
687
        if (has_error_code) {
688
            int type;
689
            uint32_t mask;
690
            /* push the error code */
691
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
692
            shift = type >> 3;
693
            if (env->segs[R_SS].flags & DESC_B_MASK)
694
                mask = 0xffffffff;
695
            else
696
                mask = 0xffff;
697
            esp = (ESP - (2 << shift)) & mask;
698
            ssp = env->segs[R_SS].base + esp;
699
            if (shift)
700
                stl_kernel(ssp, error_code);
701
            else
702
                stw_kernel(ssp, error_code);
703
            SET_ESP(esp, mask);
704
        }
705
        return;
706
    case 6: /* 286 interrupt gate */
707
    case 7: /* 286 trap gate */
708
    case 14: /* 386 interrupt gate */
709
    case 15: /* 386 trap gate */
710
        break;
711
    default:
712
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
713
        break;
714
    }
715
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
716
    cpl = env->hflags & HF_CPL_MASK;
717
    /* check privilege if software int */
718
    if (is_int && dpl < cpl)
719
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720
    /* check valid bit */
721
    if (!(e2 & DESC_P_MASK))
722
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
723
    selector = e1 >> 16;
724
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
725
    if ((selector & 0xfffc) == 0)
726
        raise_exception_err(EXCP0D_GPF, 0);
727

    
728
    if (load_segment(&e1, &e2, selector) != 0)
729
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
731
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
733
    if (dpl > cpl)
734
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735
    if (!(e2 & DESC_P_MASK))
736
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
737
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
738
        /* to inner privilege */
739
        get_ss_esp_from_tss(&ss, &esp, dpl);
740
        if ((ss & 0xfffc) == 0)
741
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742
        if ((ss & 3) != dpl)
743
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
745
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
747
        if (ss_dpl != dpl)
748
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749
        if (!(ss_e2 & DESC_S_MASK) ||
750
            (ss_e2 & DESC_CS_MASK) ||
751
            !(ss_e2 & DESC_W_MASK))
752
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753
        if (!(ss_e2 & DESC_P_MASK))
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        new_stack = 1;
756
        sp_mask = get_sp_mask(ss_e2);
757
        ssp = get_seg_base(ss_e1, ss_e2);
758
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759
        /* to same privilege */
760
        if (env->eflags & VM_MASK)
761
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762
        new_stack = 0;
763
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
764
        ssp = env->segs[R_SS].base;
765
        esp = ESP;
766
        dpl = cpl;
767
    } else {
768
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769
        new_stack = 0; /* avoid warning */
770
        sp_mask = 0; /* avoid warning */
771
        ssp = 0; /* avoid warning */
772
        esp = 0; /* avoid warning */
773
    }
774

    
775
    shift = type >> 3;
776

    
777
#if 0
778
    /* XXX: check that enough room is available */
779
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
780
    if (env->eflags & VM_MASK)
781
        push_size += 8;
782
    push_size <<= shift;
783
#endif
784
    if (shift == 1) {
785
        if (new_stack) {
786
            if (env->eflags & VM_MASK) {
787
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
788
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
791
            }
792
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
793
            PUSHL(ssp, esp, sp_mask, ESP);
794
        }
795
        PUSHL(ssp, esp, sp_mask, compute_eflags());
796
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
797
        PUSHL(ssp, esp, sp_mask, old_eip);
798
        if (has_error_code) {
799
            PUSHL(ssp, esp, sp_mask, error_code);
800
        }
801
    } else {
802
        if (new_stack) {
803
            if (env->eflags & VM_MASK) {
804
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
805
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
808
            }
809
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
810
            PUSHW(ssp, esp, sp_mask, ESP);
811
        }
812
        PUSHW(ssp, esp, sp_mask, compute_eflags());
813
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
814
        PUSHW(ssp, esp, sp_mask, old_eip);
815
        if (has_error_code) {
816
            PUSHW(ssp, esp, sp_mask, error_code);
817
        }
818
    }
819

    
820
    if (new_stack) {
821
        if (env->eflags & VM_MASK) {
822
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
823
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
824
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
826
        }
827
        ss = (ss & ~3) | dpl;
828
        cpu_x86_load_seg_cache(env, R_SS, ss,
829
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
830
    }
831
    SET_ESP(esp, sp_mask);
832

    
833
    selector = (selector & ~3) | dpl;
834
    cpu_x86_load_seg_cache(env, R_CS, selector,
835
                   get_seg_base(e1, e2),
836
                   get_seg_limit(e1, e2),
837
                   e2);
838
    cpu_x86_set_cpl(env, dpl);
839
    env->eip = offset;
840

    
841
    /* interrupt gate clear IF mask */
842
    if ((type & 1) == 0) {
843
        env->eflags &= ~IF_MASK;
844
    }
845
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
846
}
847

    
848
#ifdef TARGET_X86_64
849

    
850
#define PUSHQ(sp, val)\
851
{\
852
    sp -= 8;\
853
    stq_kernel(sp, (val));\
854
}
855

    
856
#define POPQ(sp, val)\
857
{\
858
    val = ldq_kernel(sp);\
859
    sp += 8;\
860
}
861

    
862
static inline target_ulong get_rsp_from_tss(int level)
863
{
864
    int index;
865

    
866
#if 0
867
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
868
           env->tr.base, env->tr.limit);
869
#endif
870

    
871
    if (!(env->tr.flags & DESC_P_MASK))
872
        cpu_abort(env, "invalid tss");
873
    index = 8 * level + 4;
874
    if ((index + 7) > env->tr.limit)
875
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
876
    return ldq_kernel(env->tr.base + index);
877
}
878

    
879
/* 64 bit interrupt */
880
static void do_interrupt64(int intno, int is_int, int error_code,
881
                           target_ulong next_eip, int is_hw)
882
{
883
    SegmentCache *dt;
884
    target_ulong ptr;
885
    int type, dpl, selector, cpl, ist;
886
    int has_error_code, new_stack;
887
    uint32_t e1, e2, e3, ss;
888
    target_ulong old_eip, esp, offset;
889

    
890
    has_error_code = 0;
891
    if (!is_int && !is_hw)
892
        has_error_code = exeption_has_error_code(intno);
893
    if (is_int)
894
        old_eip = next_eip;
895
    else
896
        old_eip = env->eip;
897

    
898
    dt = &env->idt;
899
    if (intno * 16 + 15 > dt->limit)
900
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
901
    ptr = dt->base + intno * 16;
902
    e1 = ldl_kernel(ptr);
903
    e2 = ldl_kernel(ptr + 4);
904
    e3 = ldl_kernel(ptr + 8);
905
    /* check gate type */
906
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
907
    switch(type) {
908
    case 14: /* 386 interrupt gate */
909
    case 15: /* 386 trap gate */
910
        break;
911
    default:
912
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913
        break;
914
    }
915
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916
    cpl = env->hflags & HF_CPL_MASK;
917
    /* check privilege if software int */
918
    if (is_int && dpl < cpl)
919
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920
    /* check valid bit */
921
    if (!(e2 & DESC_P_MASK))
922
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
923
    selector = e1 >> 16;
924
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925
    ist = e2 & 7;
926
    if ((selector & 0xfffc) == 0)
927
        raise_exception_err(EXCP0D_GPF, 0);
928

    
929
    if (load_segment(&e1, &e2, selector) != 0)
930
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934
    if (dpl > cpl)
935
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936
    if (!(e2 & DESC_P_MASK))
937
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
938
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
939
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
941
        /* to inner privilege */
942
        if (ist != 0)
943
            esp = get_rsp_from_tss(ist + 3);
944
        else
945
            esp = get_rsp_from_tss(dpl);
946
        esp &= ~0xfLL; /* align stack */
947
        ss = 0;
948
        new_stack = 1;
949
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
950
        /* to same privilege */
951
        if (env->eflags & VM_MASK)
952
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
953
        new_stack = 0;
954
        if (ist != 0)
955
            esp = get_rsp_from_tss(ist + 3);
956
        else
957
            esp = ESP;
958
        esp &= ~0xfLL; /* align stack */
959
        dpl = cpl;
960
    } else {
961
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962
        new_stack = 0; /* avoid warning */
963
        esp = 0; /* avoid warning */
964
    }
965

    
966
    PUSHQ(esp, env->segs[R_SS].selector);
967
    PUSHQ(esp, ESP);
968
    PUSHQ(esp, compute_eflags());
969
    PUSHQ(esp, env->segs[R_CS].selector);
970
    PUSHQ(esp, old_eip);
971
    if (has_error_code) {
972
        PUSHQ(esp, error_code);
973
    }
974

    
975
    if (new_stack) {
976
        ss = 0 | dpl;
977
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
978
    }
979
    ESP = esp;
980

    
981
    selector = (selector & ~3) | dpl;
982
    cpu_x86_load_seg_cache(env, R_CS, selector,
983
                   get_seg_base(e1, e2),
984
                   get_seg_limit(e1, e2),
985
                   e2);
986
    cpu_x86_set_cpl(env, dpl);
987
    env->eip = offset;
988

    
989
    /* interrupt gate clear IF mask */
990
    if ((type & 1) == 0) {
991
        env->eflags &= ~IF_MASK;
992
    }
993
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
994
}
995
#endif
996

    
997
#ifdef TARGET_X86_64
998
#if defined(CONFIG_USER_ONLY)
999
void helper_syscall(int next_eip_addend)
1000
{
1001
    env->exception_index = EXCP_SYSCALL;
1002
    env->exception_next_eip = env->eip + next_eip_addend;
1003
    cpu_loop_exit();
1004
}
1005
#else
1006
void helper_syscall(int next_eip_addend)
1007
{
1008
    int selector;
1009

    
1010
    if (!(env->efer & MSR_EFER_SCE)) {
1011
        raise_exception_err(EXCP06_ILLOP, 0);
1012
    }
1013
    selector = (env->star >> 32) & 0xffff;
1014
    if (env->hflags & HF_LMA_MASK) {
1015
        int code64;
1016

    
1017
        ECX = env->eip + next_eip_addend;
1018
        env->regs[11] = compute_eflags();
1019

    
1020
        code64 = env->hflags & HF_CS64_MASK;
1021

    
1022
        cpu_x86_set_cpl(env, 0);
1023
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1024
                           0, 0xffffffff,
1025
                               DESC_G_MASK | DESC_P_MASK |
1026
                               DESC_S_MASK |
1027
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1028
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1029
                               0, 0xffffffff,
1030
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031
                               DESC_S_MASK |
1032
                               DESC_W_MASK | DESC_A_MASK);
1033
        env->eflags &= ~env->fmask;
1034
        load_eflags(env->eflags, 0);
1035
        if (code64)
1036
            env->eip = env->lstar;
1037
        else
1038
            env->eip = env->cstar;
1039
    } else {
1040
        ECX = (uint32_t)(env->eip + next_eip_addend);
1041

    
1042
        cpu_x86_set_cpl(env, 0);
1043
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1044
                           0, 0xffffffff,
1045
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
                               DESC_S_MASK |
1047
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1048
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1049
                               0, 0xffffffff,
1050
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051
                               DESC_S_MASK |
1052
                               DESC_W_MASK | DESC_A_MASK);
1053
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1054
        env->eip = (uint32_t)env->star;
1055
    }
1056
}
1057
#endif
1058
#endif
1059

    
1060
#ifdef TARGET_X86_64
1061
void helper_sysret(int dflag)
1062
{
1063
    int cpl, selector;
1064

    
1065
    if (!(env->efer & MSR_EFER_SCE)) {
1066
        raise_exception_err(EXCP06_ILLOP, 0);
1067
    }
1068
    cpl = env->hflags & HF_CPL_MASK;
1069
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1070
        raise_exception_err(EXCP0D_GPF, 0);
1071
    }
1072
    selector = (env->star >> 48) & 0xffff;
1073
    if (env->hflags & HF_LMA_MASK) {
1074
        if (dflag == 2) {
1075
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1076
                                   0, 0xffffffff,
1077
                                   DESC_G_MASK | DESC_P_MASK |
1078
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1080
                                   DESC_L_MASK);
1081
            env->eip = ECX;
1082
        } else {
1083
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084
                                   0, 0xffffffff,
1085
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1088
            env->eip = (uint32_t)ECX;
1089
        }
1090
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1091
                               0, 0xffffffff,
1092
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094
                               DESC_W_MASK | DESC_A_MASK);
1095
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1096
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1097
        cpu_x86_set_cpl(env, 3);
1098
    } else {
1099
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1100
                               0, 0xffffffff,
1101
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1104
        env->eip = (uint32_t)ECX;
1105
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1106
                               0, 0xffffffff,
1107
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109
                               DESC_W_MASK | DESC_A_MASK);
1110
        env->eflags |= IF_MASK;
1111
        cpu_x86_set_cpl(env, 3);
1112
    }
1113
}
1114
#endif
1115

    
1116
/* real mode interrupt */
1117
static void do_interrupt_real(int intno, int is_int, int error_code,
1118
                              unsigned int next_eip)
1119
{
1120
    SegmentCache *dt;
1121
    target_ulong ptr, ssp;
1122
    int selector;
1123
    uint32_t offset, esp;
1124
    uint32_t old_cs, old_eip;
1125

    
1126
    /* real mode (simpler !) */
1127
    dt = &env->idt;
1128
    if (intno * 4 + 3 > dt->limit)
1129
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130
    ptr = dt->base + intno * 4;
1131
    offset = lduw_kernel(ptr);
1132
    selector = lduw_kernel(ptr + 2);
1133
    esp = ESP;
1134
    ssp = env->segs[R_SS].base;
1135
    if (is_int)
1136
        old_eip = next_eip;
1137
    else
1138
        old_eip = env->eip;
1139
    old_cs = env->segs[R_CS].selector;
1140
    /* XXX: use SS segment size ? */
1141
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1142
    PUSHW(ssp, esp, 0xffff, old_cs);
1143
    PUSHW(ssp, esp, 0xffff, old_eip);
1144

    
1145
    /* update processor state */
1146
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147
    env->eip = offset;
1148
    env->segs[R_CS].selector = selector;
1149
    env->segs[R_CS].base = (selector << 4);
1150
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151
}
1152

    
1153
/* fake user mode interrupt */
1154
void do_interrupt_user(int intno, int is_int, int error_code,
1155
                       target_ulong next_eip)
1156
{
1157
    SegmentCache *dt;
1158
    target_ulong ptr;
1159
    int dpl, cpl, shift;
1160
    uint32_t e2;
1161

    
1162
    dt = &env->idt;
1163
    if (env->hflags & HF_LMA_MASK) {
1164
        shift = 4;
1165
    } else {
1166
        shift = 3;
1167
    }
1168
    ptr = dt->base + (intno << shift);
1169
    e2 = ldl_kernel(ptr + 4);
1170

    
1171
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172
    cpl = env->hflags & HF_CPL_MASK;
1173
    /* check privilege if software int */
1174
    if (is_int && dpl < cpl)
1175
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176

    
1177
    /* Since we emulate only user space, we cannot do more than
1178
       exiting the emulation with the suitable exception and error
1179
       code */
1180
    if (is_int)
1181
        EIP = next_eip;
1182
}
1183

    
1184
#if !defined(CONFIG_USER_ONLY)
1185
static void handle_even_inj(int intno, int is_int, int error_code,
1186
                int is_hw, int rm)
1187
{
1188
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1189
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1190
            int type;
1191
            if (is_int)
1192
                    type = SVM_EVTINJ_TYPE_SOFT;
1193
            else
1194
                    type = SVM_EVTINJ_TYPE_EXEPT;
1195
            event_inj = intno | type | SVM_EVTINJ_VALID;
1196
            if (!rm && exeption_has_error_code(intno)) {
1197
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1198
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1199
            }
1200
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1201
    }
1202
}
1203
#endif
1204

    
1205
/*
1206
 * Begin execution of an interruption. is_int is TRUE if coming from
1207
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1208
 * instruction. It is only relevant if is_int is TRUE.
1209
 */
1210
void do_interrupt(int intno, int is_int, int error_code,
1211
                  target_ulong next_eip, int is_hw)
1212
{
1213
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1214
        if ((env->cr[0] & CR0_PE_MASK)) {
1215
            static int count;
1216
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1217
                    count, intno, error_code, is_int,
1218
                    env->hflags & HF_CPL_MASK,
1219
                    env->segs[R_CS].selector, EIP,
1220
                    (int)env->segs[R_CS].base + EIP,
1221
                    env->segs[R_SS].selector, ESP);
1222
            if (intno == 0x0e) {
1223
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1224
            } else {
1225
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1226
            }
1227
            qemu_log("\n");
1228
            log_cpu_state(env, X86_DUMP_CCOP);
1229
#if 0
1230
            {
1231
                int i;
1232
                target_ulong ptr;
1233
                qemu_log("       code=");
1234
                ptr = env->segs[R_CS].base + env->eip;
1235
                for(i = 0; i < 16; i++) {
1236
                    qemu_log(" %02x", ldub(ptr + i));
1237
                }
1238
                qemu_log("\n");
1239
            }
1240
#endif
1241
            count++;
1242
        }
1243
    }
1244
    if (env->cr[0] & CR0_PE_MASK) {
1245
#if !defined(CONFIG_USER_ONLY)
1246
        if (env->hflags & HF_SVMI_MASK)
1247
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1248
#endif
1249
#ifdef TARGET_X86_64
1250
        if (env->hflags & HF_LMA_MASK) {
1251
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1252
        } else
1253
#endif
1254
        {
1255
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1256
        }
1257
    } else {
1258
#if !defined(CONFIG_USER_ONLY)
1259
        if (env->hflags & HF_SVMI_MASK)
1260
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1261
#endif
1262
        do_interrupt_real(intno, is_int, error_code, next_eip);
1263
    }
1264

    
1265
#if !defined(CONFIG_USER_ONLY)
1266
    if (env->hflags & HF_SVMI_MASK) {
1267
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1268
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1269
    }
1270
#endif
1271
}
1272

    
1273
/* This should come from sysemu.h - if we could include it here... */
1274
void qemu_system_reset_request(void);
1275

    
1276
/*
1277
 * Check nested exceptions and change to double or triple fault if
1278
 * needed. It should only be called, if this is not an interrupt.
1279
 * Returns the new exception number.
1280
 */
1281
static int check_exception(int intno, int *error_code)
1282
{
1283
    int first_contributory = env->old_exception == 0 ||
1284
                              (env->old_exception >= 10 &&
1285
                               env->old_exception <= 13);
1286
    int second_contributory = intno == 0 ||
1287
                               (intno >= 10 && intno <= 13);
1288

    
1289
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1290
                env->old_exception, intno);
1291

    
1292
#if !defined(CONFIG_USER_ONLY)
1293
    if (env->old_exception == EXCP08_DBLE) {
1294
        if (env->hflags & HF_SVMI_MASK)
1295
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1296

    
1297
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1298

    
1299
        qemu_system_reset_request();
1300
        return EXCP_HLT;
1301
    }
1302
#endif
1303

    
1304
    if ((first_contributory && second_contributory)
1305
        || (env->old_exception == EXCP0E_PAGE &&
1306
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1307
        intno = EXCP08_DBLE;
1308
        *error_code = 0;
1309
    }
1310

    
1311
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1312
        (intno == EXCP08_DBLE))
1313
        env->old_exception = intno;
1314

    
1315
    return intno;
1316
}
1317

    
1318
/*
1319
 * Signal an interruption. It is executed in the main CPU loop.
1320
 * is_int is TRUE if coming from the int instruction. next_eip is the
1321
 * EIP value AFTER the interrupt instruction. It is only relevant if
1322
 * is_int is TRUE.
1323
 */
1324
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1325
                                          int next_eip_addend)
1326
{
1327
    if (!is_int) {
1328
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1329
        intno = check_exception(intno, &error_code);
1330
    } else {
1331
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1332
    }
1333

    
1334
    env->exception_index = intno;
1335
    env->error_code = error_code;
1336
    env->exception_is_int = is_int;
1337
    env->exception_next_eip = env->eip + next_eip_addend;
1338
    cpu_loop_exit();
1339
}
1340

    
1341
/* shortcuts to generate exceptions */
1342

    
1343
void raise_exception_err(int exception_index, int error_code)
1344
{
1345
    raise_interrupt(exception_index, 0, error_code, 0);
1346
}
1347

    
1348
void raise_exception(int exception_index)
1349
{
1350
    raise_interrupt(exception_index, 0, 0, 0);
1351
}
1352

    
1353
void raise_exception_env(int exception_index, CPUState *nenv)
1354
{
1355
    env = nenv;
1356
    raise_exception(exception_index);
1357
}
1358
/* SMM support */
1359

    
1360
#if defined(CONFIG_USER_ONLY)
1361

    
1362
void do_smm_enter(void)
1363
{
1364
}
1365

    
1366
void helper_rsm(void)
1367
{
1368
}
1369

    
1370
#else
1371

    
1372
#ifdef TARGET_X86_64
1373
#define SMM_REVISION_ID 0x00020064
1374
#else
1375
#define SMM_REVISION_ID 0x00020000
1376
#endif
1377

    
1378
void do_smm_enter(void)
1379
{
1380
    target_ulong sm_state;
1381
    SegmentCache *dt;
1382
    int i, offset;
1383

    
1384
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1385
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1386

    
1387
    env->hflags |= HF_SMM_MASK;
1388
    cpu_smm_update(env);
1389

    
1390
    sm_state = env->smbase + 0x8000;
1391

    
1392
#ifdef TARGET_X86_64
1393
    for(i = 0; i < 6; i++) {
1394
        dt = &env->segs[i];
1395
        offset = 0x7e00 + i * 16;
1396
        stw_phys(sm_state + offset, dt->selector);
1397
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1398
        stl_phys(sm_state + offset + 4, dt->limit);
1399
        stq_phys(sm_state + offset + 8, dt->base);
1400
    }
1401

    
1402
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1403
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1404

    
1405
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1406
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1407
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1408
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1409

    
1410
    stq_phys(sm_state + 0x7e88, env->idt.base);
1411
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1412

    
1413
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1414
    stq_phys(sm_state + 0x7e98, env->tr.base);
1415
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1416
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1417

    
1418
    stq_phys(sm_state + 0x7ed0, env->efer);
1419

    
1420
    stq_phys(sm_state + 0x7ff8, EAX);
1421
    stq_phys(sm_state + 0x7ff0, ECX);
1422
    stq_phys(sm_state + 0x7fe8, EDX);
1423
    stq_phys(sm_state + 0x7fe0, EBX);
1424
    stq_phys(sm_state + 0x7fd8, ESP);
1425
    stq_phys(sm_state + 0x7fd0, EBP);
1426
    stq_phys(sm_state + 0x7fc8, ESI);
1427
    stq_phys(sm_state + 0x7fc0, EDI);
1428
    for(i = 8; i < 16; i++)
1429
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1430
    stq_phys(sm_state + 0x7f78, env->eip);
1431
    stl_phys(sm_state + 0x7f70, compute_eflags());
1432
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1433
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1434

    
1435
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1436
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1437
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1438

    
1439
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1440
    stl_phys(sm_state + 0x7f00, env->smbase);
1441
#else
1442
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1443
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1444
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1445
    stl_phys(sm_state + 0x7ff0, env->eip);
1446
    stl_phys(sm_state + 0x7fec, EDI);
1447
    stl_phys(sm_state + 0x7fe8, ESI);
1448
    stl_phys(sm_state + 0x7fe4, EBP);
1449
    stl_phys(sm_state + 0x7fe0, ESP);
1450
    stl_phys(sm_state + 0x7fdc, EBX);
1451
    stl_phys(sm_state + 0x7fd8, EDX);
1452
    stl_phys(sm_state + 0x7fd4, ECX);
1453
    stl_phys(sm_state + 0x7fd0, EAX);
1454
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1455
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1456

    
1457
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1458
    stl_phys(sm_state + 0x7f64, env->tr.base);
1459
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1460
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1461

    
1462
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1463
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1464
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1465
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1466

    
1467
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1468
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1469

    
1470
    stl_phys(sm_state + 0x7f58, env->idt.base);
1471
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1472

    
1473
    for(i = 0; i < 6; i++) {
1474
        dt = &env->segs[i];
1475
        if (i < 3)
1476
            offset = 0x7f84 + i * 12;
1477
        else
1478
            offset = 0x7f2c + (i - 3) * 12;
1479
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1480
        stl_phys(sm_state + offset + 8, dt->base);
1481
        stl_phys(sm_state + offset + 4, dt->limit);
1482
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1483
    }
1484
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1485

    
1486
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1487
    stl_phys(sm_state + 0x7ef8, env->smbase);
1488
#endif
1489
    /* init SMM cpu state */
1490

    
1491
#ifdef TARGET_X86_64
1492
    cpu_load_efer(env, 0);
1493
#endif
1494
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1495
    env->eip = 0x00008000;
1496
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1497
                           0xffffffff, 0);
1498
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1499
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1500
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1501
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1502
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1503

    
1504
    cpu_x86_update_cr0(env,
1505
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1506
    cpu_x86_update_cr4(env, 0);
1507
    env->dr[7] = 0x00000400;
1508
    CC_OP = CC_OP_EFLAGS;
1509
}
1510

    
1511
void helper_rsm(void)
1512
{
1513
    target_ulong sm_state;
1514
    int i, offset;
1515
    uint32_t val;
1516

    
1517
    sm_state = env->smbase + 0x8000;
1518
#ifdef TARGET_X86_64
1519
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1520

    
1521
    for(i = 0; i < 6; i++) {
1522
        offset = 0x7e00 + i * 16;
1523
        cpu_x86_load_seg_cache(env, i,
1524
                               lduw_phys(sm_state + offset),
1525
                               ldq_phys(sm_state + offset + 8),
1526
                               ldl_phys(sm_state + offset + 4),
1527
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1528
    }
1529

    
1530
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1531
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1532

    
1533
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1534
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1535
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1536
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1537

    
1538
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1539
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1540

    
1541
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1542
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1543
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1544
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1545

    
1546
    EAX = ldq_phys(sm_state + 0x7ff8);
1547
    ECX = ldq_phys(sm_state + 0x7ff0);
1548
    EDX = ldq_phys(sm_state + 0x7fe8);
1549
    EBX = ldq_phys(sm_state + 0x7fe0);
1550
    ESP = ldq_phys(sm_state + 0x7fd8);
1551
    EBP = ldq_phys(sm_state + 0x7fd0);
1552
    ESI = ldq_phys(sm_state + 0x7fc8);
1553
    EDI = ldq_phys(sm_state + 0x7fc0);
1554
    for(i = 8; i < 16; i++)
1555
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1556
    env->eip = ldq_phys(sm_state + 0x7f78);
1557
    load_eflags(ldl_phys(sm_state + 0x7f70),
1558
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1559
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1560
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1561

    
1562
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1563
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1564
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1565

    
1566
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567
    if (val & 0x20000) {
1568
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1569
    }
1570
#else
1571
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1572
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1573
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1574
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1575
    env->eip = ldl_phys(sm_state + 0x7ff0);
1576
    EDI = ldl_phys(sm_state + 0x7fec);
1577
    ESI = ldl_phys(sm_state + 0x7fe8);
1578
    EBP = ldl_phys(sm_state + 0x7fe4);
1579
    ESP = ldl_phys(sm_state + 0x7fe0);
1580
    EBX = ldl_phys(sm_state + 0x7fdc);
1581
    EDX = ldl_phys(sm_state + 0x7fd8);
1582
    ECX = ldl_phys(sm_state + 0x7fd4);
1583
    EAX = ldl_phys(sm_state + 0x7fd0);
1584
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1585
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1586

    
1587
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1588
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1589
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1590
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1591

    
1592
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1593
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1594
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1595
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1596

    
1597
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1598
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1599

    
1600
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1601
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1602

    
1603
    for(i = 0; i < 6; i++) {
1604
        if (i < 3)
1605
            offset = 0x7f84 + i * 12;
1606
        else
1607
            offset = 0x7f2c + (i - 3) * 12;
1608
        cpu_x86_load_seg_cache(env, i,
1609
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1610
                               ldl_phys(sm_state + offset + 8),
1611
                               ldl_phys(sm_state + offset + 4),
1612
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1613
    }
1614
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1615

    
1616
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1617
    if (val & 0x20000) {
1618
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1619
    }
1620
#endif
1621
    CC_OP = CC_OP_EFLAGS;
1622
    env->hflags &= ~HF_SMM_MASK;
1623
    cpu_smm_update(env);
1624

    
1625
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1626
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1627
}
1628

    
1629
#endif /* !CONFIG_USER_ONLY */
1630

    
1631

    
1632
/* division, flags are undefined */
1633

    
1634
void helper_divb_AL(target_ulong t0)
1635
{
1636
    unsigned int num, den, q, r;
1637

    
1638
    num = (EAX & 0xffff);
1639
    den = (t0 & 0xff);
1640
    if (den == 0) {
1641
        raise_exception(EXCP00_DIVZ);
1642
    }
1643
    q = (num / den);
1644
    if (q > 0xff)
1645
        raise_exception(EXCP00_DIVZ);
1646
    q &= 0xff;
1647
    r = (num % den) & 0xff;
1648
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1649
}
1650

    
1651
void helper_idivb_AL(target_ulong t0)
1652
{
1653
    int num, den, q, r;
1654

    
1655
    num = (int16_t)EAX;
1656
    den = (int8_t)t0;
1657
    if (den == 0) {
1658
        raise_exception(EXCP00_DIVZ);
1659
    }
1660
    q = (num / den);
1661
    if (q != (int8_t)q)
1662
        raise_exception(EXCP00_DIVZ);
1663
    q &= 0xff;
1664
    r = (num % den) & 0xff;
1665
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1666
}
1667

    
1668
void helper_divw_AX(target_ulong t0)
1669
{
1670
    unsigned int num, den, q, r;
1671

    
1672
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1673
    den = (t0 & 0xffff);
1674
    if (den == 0) {
1675
        raise_exception(EXCP00_DIVZ);
1676
    }
1677
    q = (num / den);
1678
    if (q > 0xffff)
1679
        raise_exception(EXCP00_DIVZ);
1680
    q &= 0xffff;
1681
    r = (num % den) & 0xffff;
1682
    EAX = (EAX & ~0xffff) | q;
1683
    EDX = (EDX & ~0xffff) | r;
1684
}
1685

    
1686
void helper_idivw_AX(target_ulong t0)
1687
{
1688
    int num, den, q, r;
1689

    
1690
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1691
    den = (int16_t)t0;
1692
    if (den == 0) {
1693
        raise_exception(EXCP00_DIVZ);
1694
    }
1695
    q = (num / den);
1696
    if (q != (int16_t)q)
1697
        raise_exception(EXCP00_DIVZ);
1698
    q &= 0xffff;
1699
    r = (num % den) & 0xffff;
1700
    EAX = (EAX & ~0xffff) | q;
1701
    EDX = (EDX & ~0xffff) | r;
1702
}
1703

    
1704
void helper_divl_EAX(target_ulong t0)
1705
{
1706
    unsigned int den, r;
1707
    uint64_t num, q;
1708

    
1709
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1710
    den = t0;
1711
    if (den == 0) {
1712
        raise_exception(EXCP00_DIVZ);
1713
    }
1714
    q = (num / den);
1715
    r = (num % den);
1716
    if (q > 0xffffffff)
1717
        raise_exception(EXCP00_DIVZ);
1718
    EAX = (uint32_t)q;
1719
    EDX = (uint32_t)r;
1720
}
1721

    
1722
void helper_idivl_EAX(target_ulong t0)
1723
{
1724
    int den, r;
1725
    int64_t num, q;
1726

    
1727
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1728
    den = t0;
1729
    if (den == 0) {
1730
        raise_exception(EXCP00_DIVZ);
1731
    }
1732
    q = (num / den);
1733
    r = (num % den);
1734
    if (q != (int32_t)q)
1735
        raise_exception(EXCP00_DIVZ);
1736
    EAX = (uint32_t)q;
1737
    EDX = (uint32_t)r;
1738
}
1739

    
1740
/* bcd */
1741

    
1742
/* XXX: exception */
1743
void helper_aam(int base)
1744
{
1745
    int al, ah;
1746
    al = EAX & 0xff;
1747
    ah = al / base;
1748
    al = al % base;
1749
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1750
    CC_DST = al;
1751
}
1752

    
1753
void helper_aad(int base)
1754
{
1755
    int al, ah;
1756
    al = EAX & 0xff;
1757
    ah = (EAX >> 8) & 0xff;
1758
    al = ((ah * base) + al) & 0xff;
1759
    EAX = (EAX & ~0xffff) | al;
1760
    CC_DST = al;
1761
}
1762

    
1763
void helper_aaa(void)
1764
{
1765
    int icarry;
1766
    int al, ah, af;
1767
    int eflags;
1768

    
1769
    eflags = helper_cc_compute_all(CC_OP);
1770
    af = eflags & CC_A;
1771
    al = EAX & 0xff;
1772
    ah = (EAX >> 8) & 0xff;
1773

    
1774
    icarry = (al > 0xf9);
1775
    if (((al & 0x0f) > 9 ) || af) {
1776
        al = (al + 6) & 0x0f;
1777
        ah = (ah + 1 + icarry) & 0xff;
1778
        eflags |= CC_C | CC_A;
1779
    } else {
1780
        eflags &= ~(CC_C | CC_A);
1781
        al &= 0x0f;
1782
    }
1783
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1784
    CC_SRC = eflags;
1785
}
1786

    
1787
void helper_aas(void)
1788
{
1789
    int icarry;
1790
    int al, ah, af;
1791
    int eflags;
1792

    
1793
    eflags = helper_cc_compute_all(CC_OP);
1794
    af = eflags & CC_A;
1795
    al = EAX & 0xff;
1796
    ah = (EAX >> 8) & 0xff;
1797

    
1798
    icarry = (al < 6);
1799
    if (((al & 0x0f) > 9 ) || af) {
1800
        al = (al - 6) & 0x0f;
1801
        ah = (ah - 1 - icarry) & 0xff;
1802
        eflags |= CC_C | CC_A;
1803
    } else {
1804
        eflags &= ~(CC_C | CC_A);
1805
        al &= 0x0f;
1806
    }
1807
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1808
    CC_SRC = eflags;
1809
}
1810

    
1811
void helper_daa(void)
1812
{
1813
    int al, af, cf;
1814
    int eflags;
1815

    
1816
    eflags = helper_cc_compute_all(CC_OP);
1817
    cf = eflags & CC_C;
1818
    af = eflags & CC_A;
1819
    al = EAX & 0xff;
1820

    
1821
    eflags = 0;
1822
    if (((al & 0x0f) > 9 ) || af) {
1823
        al = (al + 6) & 0xff;
1824
        eflags |= CC_A;
1825
    }
1826
    if ((al > 0x9f) || cf) {
1827
        al = (al + 0x60) & 0xff;
1828
        eflags |= CC_C;
1829
    }
1830
    EAX = (EAX & ~0xff) | al;
1831
    /* well, speed is not an issue here, so we compute the flags by hand */
1832
    eflags |= (al == 0) << 6; /* zf */
1833
    eflags |= parity_table[al]; /* pf */
1834
    eflags |= (al & 0x80); /* sf */
1835
    CC_SRC = eflags;
1836
}
1837

    
1838
void helper_das(void)
1839
{
1840
    int al, al1, af, cf;
1841
    int eflags;
1842

    
1843
    eflags = helper_cc_compute_all(CC_OP);
1844
    cf = eflags & CC_C;
1845
    af = eflags & CC_A;
1846
    al = EAX & 0xff;
1847

    
1848
    eflags = 0;
1849
    al1 = al;
1850
    if (((al & 0x0f) > 9 ) || af) {
1851
        eflags |= CC_A;
1852
        if (al < 6 || cf)
1853
            eflags |= CC_C;
1854
        al = (al - 6) & 0xff;
1855
    }
1856
    if ((al1 > 0x99) || cf) {
1857
        al = (al - 0x60) & 0xff;
1858
        eflags |= CC_C;
1859
    }
1860
    EAX = (EAX & ~0xff) | al;
1861
    /* well, speed is not an issue here, so we compute the flags by hand */
1862
    eflags |= (al == 0) << 6; /* zf */
1863
    eflags |= parity_table[al]; /* pf */
1864
    eflags |= (al & 0x80); /* sf */
1865
    CC_SRC = eflags;
1866
}
1867

    
1868
void helper_into(int next_eip_addend)
1869
{
1870
    int eflags;
1871
    eflags = helper_cc_compute_all(CC_OP);
1872
    if (eflags & CC_O) {
1873
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1874
    }
1875
}
1876

    
1877
void helper_cmpxchg8b(target_ulong a0)
1878
{
1879
    uint64_t d;
1880
    int eflags;
1881

    
1882
    eflags = helper_cc_compute_all(CC_OP);
1883
    d = ldq(a0);
1884
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1885
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1886
        eflags |= CC_Z;
1887
    } else {
1888
        /* always do the store */
1889
        stq(a0, d); 
1890
        EDX = (uint32_t)(d >> 32);
1891
        EAX = (uint32_t)d;
1892
        eflags &= ~CC_Z;
1893
    }
1894
    CC_SRC = eflags;
1895
}
1896

    
1897
#ifdef TARGET_X86_64
1898
void helper_cmpxchg16b(target_ulong a0)
1899
{
1900
    uint64_t d0, d1;
1901
    int eflags;
1902

    
1903
    if ((a0 & 0xf) != 0)
1904
        raise_exception(EXCP0D_GPF);
1905
    eflags = helper_cc_compute_all(CC_OP);
1906
    d0 = ldq(a0);
1907
    d1 = ldq(a0 + 8);
1908
    if (d0 == EAX && d1 == EDX) {
1909
        stq(a0, EBX);
1910
        stq(a0 + 8, ECX);
1911
        eflags |= CC_Z;
1912
    } else {
1913
        /* always do the store */
1914
        stq(a0, d0); 
1915
        stq(a0 + 8, d1); 
1916
        EDX = d1;
1917
        EAX = d0;
1918
        eflags &= ~CC_Z;
1919
    }
1920
    CC_SRC = eflags;
1921
}
1922
#endif
1923

    
1924
void helper_single_step(void)
1925
{
1926
#ifndef CONFIG_USER_ONLY
1927
    check_hw_breakpoints(env, 1);
1928
    env->dr[6] |= DR6_BS;
1929
#endif
1930
    raise_exception(EXCP01_DB);
1931
}
1932

    
1933
void helper_cpuid(void)
1934
{
1935
    uint32_t eax, ebx, ecx, edx;
1936

    
1937
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1938

    
1939
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1940
    EAX = eax;
1941
    EBX = ebx;
1942
    ECX = ecx;
1943
    EDX = edx;
1944
}
1945

    
1946
void helper_enter_level(int level, int data32, target_ulong t1)
1947
{
1948
    target_ulong ssp;
1949
    uint32_t esp_mask, esp, ebp;
1950

    
1951
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1952
    ssp = env->segs[R_SS].base;
1953
    ebp = EBP;
1954
    esp = ESP;
1955
    if (data32) {
1956
        /* 32 bit */
1957
        esp -= 4;
1958
        while (--level) {
1959
            esp -= 4;
1960
            ebp -= 4;
1961
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1962
        }
1963
        esp -= 4;
1964
        stl(ssp + (esp & esp_mask), t1);
1965
    } else {
1966
        /* 16 bit */
1967
        esp -= 2;
1968
        while (--level) {
1969
            esp -= 2;
1970
            ebp -= 2;
1971
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1972
        }
1973
        esp -= 2;
1974
        stw(ssp + (esp & esp_mask), t1);
1975
    }
1976
}
1977

    
1978
#ifdef TARGET_X86_64
1979
void helper_enter64_level(int level, int data64, target_ulong t1)
1980
{
1981
    target_ulong esp, ebp;
1982
    ebp = EBP;
1983
    esp = ESP;
1984

    
1985
    if (data64) {
1986
        /* 64 bit */
1987
        esp -= 8;
1988
        while (--level) {
1989
            esp -= 8;
1990
            ebp -= 8;
1991
            stq(esp, ldq(ebp));
1992
        }
1993
        esp -= 8;
1994
        stq(esp, t1);
1995
    } else {
1996
        /* 16 bit */
1997
        esp -= 2;
1998
        while (--level) {
1999
            esp -= 2;
2000
            ebp -= 2;
2001
            stw(esp, lduw(ebp));
2002
        }
2003
        esp -= 2;
2004
        stw(esp, t1);
2005
    }
2006
}
2007
#endif
2008

    
2009
void helper_lldt(int selector)
2010
{
2011
    SegmentCache *dt;
2012
    uint32_t e1, e2;
2013
    int index, entry_limit;
2014
    target_ulong ptr;
2015

    
2016
    selector &= 0xffff;
2017
    if ((selector & 0xfffc) == 0) {
2018
        /* XXX: NULL selector case: invalid LDT */
2019
        env->ldt.base = 0;
2020
        env->ldt.limit = 0;
2021
    } else {
2022
        if (selector & 0x4)
2023
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2024
        dt = &env->gdt;
2025
        index = selector & ~7;
2026
#ifdef TARGET_X86_64
2027
        if (env->hflags & HF_LMA_MASK)
2028
            entry_limit = 15;
2029
        else
2030
#endif
2031
            entry_limit = 7;
2032
        if ((index + entry_limit) > dt->limit)
2033
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2034
        ptr = dt->base + index;
2035
        e1 = ldl_kernel(ptr);
2036
        e2 = ldl_kernel(ptr + 4);
2037
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2038
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039
        if (!(e2 & DESC_P_MASK))
2040
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2041
#ifdef TARGET_X86_64
2042
        if (env->hflags & HF_LMA_MASK) {
2043
            uint32_t e3;
2044
            e3 = ldl_kernel(ptr + 8);
2045
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2046
            env->ldt.base |= (target_ulong)e3 << 32;
2047
        } else
2048
#endif
2049
        {
2050
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2051
        }
2052
    }
2053
    env->ldt.selector = selector;
2054
}
2055

    
2056
void helper_ltr(int selector)
2057
{
2058
    SegmentCache *dt;
2059
    uint32_t e1, e2;
2060
    int index, type, entry_limit;
2061
    target_ulong ptr;
2062

    
2063
    selector &= 0xffff;
2064
    if ((selector & 0xfffc) == 0) {
2065
        /* NULL selector case: invalid TR */
2066
        env->tr.base = 0;
2067
        env->tr.limit = 0;
2068
        env->tr.flags = 0;
2069
    } else {
2070
        if (selector & 0x4)
2071
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2072
        dt = &env->gdt;
2073
        index = selector & ~7;
2074
#ifdef TARGET_X86_64
2075
        if (env->hflags & HF_LMA_MASK)
2076
            entry_limit = 15;
2077
        else
2078
#endif
2079
            entry_limit = 7;
2080
        if ((index + entry_limit) > dt->limit)
2081
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2082
        ptr = dt->base + index;
2083
        e1 = ldl_kernel(ptr);
2084
        e2 = ldl_kernel(ptr + 4);
2085
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2086
        if ((e2 & DESC_S_MASK) ||
2087
            (type != 1 && type != 9))
2088
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089
        if (!(e2 & DESC_P_MASK))
2090
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2091
#ifdef TARGET_X86_64
2092
        if (env->hflags & HF_LMA_MASK) {
2093
            uint32_t e3, e4;
2094
            e3 = ldl_kernel(ptr + 8);
2095
            e4 = ldl_kernel(ptr + 12);
2096
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2097
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2099
            env->tr.base |= (target_ulong)e3 << 32;
2100
        } else
2101
#endif
2102
        {
2103
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2104
        }
2105
        e2 |= DESC_TSS_BUSY_MASK;
2106
        stl_kernel(ptr + 4, e2);
2107
    }
2108
    env->tr.selector = selector;
2109
}
2110

    
2111
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2112
void helper_load_seg(int seg_reg, int selector)
2113
{
2114
    uint32_t e1, e2;
2115
    int cpl, dpl, rpl;
2116
    SegmentCache *dt;
2117
    int index;
2118
    target_ulong ptr;
2119

    
2120
    selector &= 0xffff;
2121
    cpl = env->hflags & HF_CPL_MASK;
2122
    if ((selector & 0xfffc) == 0) {
2123
        /* null selector case */
2124
        if (seg_reg == R_SS
2125
#ifdef TARGET_X86_64
2126
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2127
#endif
2128
            )
2129
            raise_exception_err(EXCP0D_GPF, 0);
2130
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2131
    } else {
2132

    
2133
        if (selector & 0x4)
2134
            dt = &env->ldt;
2135
        else
2136
            dt = &env->gdt;
2137
        index = selector & ~7;
2138
        if ((index + 7) > dt->limit)
2139
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140
        ptr = dt->base + index;
2141
        e1 = ldl_kernel(ptr);
2142
        e2 = ldl_kernel(ptr + 4);
2143

    
2144
        if (!(e2 & DESC_S_MASK))
2145
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146
        rpl = selector & 3;
2147
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2148
        if (seg_reg == R_SS) {
2149
            /* must be writable segment */
2150
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2151
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152
            if (rpl != cpl || dpl != cpl)
2153
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154
        } else {
2155
            /* must be readable segment */
2156
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2157
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158

    
2159
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2160
                /* if not conforming code, test rights */
2161
                if (dpl < cpl || dpl < rpl)
2162
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2163
            }
2164
        }
2165

    
2166
        if (!(e2 & DESC_P_MASK)) {
2167
            if (seg_reg == R_SS)
2168
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2169
            else
2170
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2171
        }
2172

    
2173
        /* set the access bit if not already set */
2174
        if (!(e2 & DESC_A_MASK)) {
2175
            e2 |= DESC_A_MASK;
2176
            stl_kernel(ptr + 4, e2);
2177
        }
2178

    
2179
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2180
                       get_seg_base(e1, e2),
2181
                       get_seg_limit(e1, e2),
2182
                       e2);
2183
#if 0
2184
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2185
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2186
#endif
2187
    }
2188
}
2189

    
2190
/* protected mode jump */
2191
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2192
                           int next_eip_addend)
2193
{
2194
    int gate_cs, type;
2195
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2196
    target_ulong next_eip;
2197

    
2198
    if ((new_cs & 0xfffc) == 0)
2199
        raise_exception_err(EXCP0D_GPF, 0);
2200
    if (load_segment(&e1, &e2, new_cs) != 0)
2201
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202
    cpl = env->hflags & HF_CPL_MASK;
2203
    if (e2 & DESC_S_MASK) {
2204
        if (!(e2 & DESC_CS_MASK))
2205
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2206
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2207
        if (e2 & DESC_C_MASK) {
2208
            /* conforming code segment */
2209
            if (dpl > cpl)
2210
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211
        } else {
2212
            /* non conforming code segment */
2213
            rpl = new_cs & 3;
2214
            if (rpl > cpl)
2215
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216
            if (dpl != cpl)
2217
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218
        }
2219
        if (!(e2 & DESC_P_MASK))
2220
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2221
        limit = get_seg_limit(e1, e2);
2222
        if (new_eip > limit &&
2223
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2224
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2226
                       get_seg_base(e1, e2), limit, e2);
2227
        EIP = new_eip;
2228
    } else {
2229
        /* jump to call or task gate */
2230
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2231
        rpl = new_cs & 3;
2232
        cpl = env->hflags & HF_CPL_MASK;
2233
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2234
        switch(type) {
2235
        case 1: /* 286 TSS */
2236
        case 9: /* 386 TSS */
2237
        case 5: /* task gate */
2238
            if (dpl < cpl || dpl < rpl)
2239
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2240
            next_eip = env->eip + next_eip_addend;
2241
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2242
            CC_OP = CC_OP_EFLAGS;
2243
            break;
2244
        case 4: /* 286 call gate */
2245
        case 12: /* 386 call gate */
2246
            if ((dpl < cpl) || (dpl < rpl))
2247
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248
            if (!(e2 & DESC_P_MASK))
2249
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2250
            gate_cs = e1 >> 16;
2251
            new_eip = (e1 & 0xffff);
2252
            if (type == 12)
2253
                new_eip |= (e2 & 0xffff0000);
2254
            if (load_segment(&e1, &e2, gate_cs) != 0)
2255
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2256
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2257
            /* must be code segment */
2258
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2259
                 (DESC_S_MASK | DESC_CS_MASK)))
2260
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2262
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2263
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2264
            if (!(e2 & DESC_P_MASK))
2265
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266
            limit = get_seg_limit(e1, e2);
2267
            if (new_eip > limit)
2268
                raise_exception_err(EXCP0D_GPF, 0);
2269
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2270
                                   get_seg_base(e1, e2), limit, e2);
2271
            EIP = new_eip;
2272
            break;
2273
        default:
2274
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2275
            break;
2276
        }
2277
    }
2278
}
2279

    
2280
/* real mode call */
2281
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2282
                       int shift, int next_eip)
2283
{
2284
    int new_eip;
2285
    uint32_t esp, esp_mask;
2286
    target_ulong ssp;
2287

    
2288
    new_eip = new_eip1;
2289
    esp = ESP;
2290
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2291
    ssp = env->segs[R_SS].base;
2292
    if (shift) {
2293
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2294
        PUSHL(ssp, esp, esp_mask, next_eip);
2295
    } else {
2296
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2297
        PUSHW(ssp, esp, esp_mask, next_eip);
2298
    }
2299

    
2300
    SET_ESP(esp, esp_mask);
2301
    env->eip = new_eip;
2302
    env->segs[R_CS].selector = new_cs;
2303
    env->segs[R_CS].base = (new_cs << 4);
2304
}
2305

    
2306
/* protected mode call */
2307
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2308
                            int shift, int next_eip_addend)
2309
{
2310
    int new_stack, i;
2311
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2312
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2313
    uint32_t val, limit, old_sp_mask;
2314
    target_ulong ssp, old_ssp, next_eip;
2315

    
2316
    next_eip = env->eip + next_eip_addend;
2317
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2318
    LOG_PCALL_STATE(env);
2319
    if ((new_cs & 0xfffc) == 0)
2320
        raise_exception_err(EXCP0D_GPF, 0);
2321
    if (load_segment(&e1, &e2, new_cs) != 0)
2322
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2323
    cpl = env->hflags & HF_CPL_MASK;
2324
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2325
    if (e2 & DESC_S_MASK) {
2326
        if (!(e2 & DESC_CS_MASK))
2327
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2329
        if (e2 & DESC_C_MASK) {
2330
            /* conforming code segment */
2331
            if (dpl > cpl)
2332
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333
        } else {
2334
            /* non conforming code segment */
2335
            rpl = new_cs & 3;
2336
            if (rpl > cpl)
2337
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338
            if (dpl != cpl)
2339
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340
        }
2341
        if (!(e2 & DESC_P_MASK))
2342
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2343

    
2344
#ifdef TARGET_X86_64
2345
        /* XXX: check 16/32 bit cases in long mode */
2346
        if (shift == 2) {
2347
            target_ulong rsp;
2348
            /* 64 bit case */
2349
            rsp = ESP;
2350
            PUSHQ(rsp, env->segs[R_CS].selector);
2351
            PUSHQ(rsp, next_eip);
2352
            /* from this point, not restartable */
2353
            ESP = rsp;
2354
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2355
                                   get_seg_base(e1, e2),
2356
                                   get_seg_limit(e1, e2), e2);
2357
            EIP = new_eip;
2358
        } else
2359
#endif
2360
        {
2361
            sp = ESP;
2362
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2363
            ssp = env->segs[R_SS].base;
2364
            if (shift) {
2365
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2366
                PUSHL(ssp, sp, sp_mask, next_eip);
2367
            } else {
2368
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2369
                PUSHW(ssp, sp, sp_mask, next_eip);
2370
            }
2371

    
2372
            limit = get_seg_limit(e1, e2);
2373
            if (new_eip > limit)
2374
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375
            /* from this point, not restartable */
2376
            SET_ESP(sp, sp_mask);
2377
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2378
                                   get_seg_base(e1, e2), limit, e2);
2379
            EIP = new_eip;
2380
        }
2381
    } else {
2382
        /* check gate type */
2383
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2384
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2385
        rpl = new_cs & 3;
2386
        switch(type) {
2387
        case 1: /* available 286 TSS */
2388
        case 9: /* available 386 TSS */
2389
        case 5: /* task gate */
2390
            if (dpl < cpl || dpl < rpl)
2391
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2392
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2393
            CC_OP = CC_OP_EFLAGS;
2394
            return;
2395
        case 4: /* 286 call gate */
2396
        case 12: /* 386 call gate */
2397
            break;
2398
        default:
2399
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400
            break;
2401
        }
2402
        shift = type >> 3;
2403

    
2404
        if (dpl < cpl || dpl < rpl)
2405
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406
        /* check valid bit */
2407
        if (!(e2 & DESC_P_MASK))
2408
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2409
        selector = e1 >> 16;
2410
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2411
        param_count = e2 & 0x1f;
2412
        if ((selector & 0xfffc) == 0)
2413
            raise_exception_err(EXCP0D_GPF, 0);
2414

    
2415
        if (load_segment(&e1, &e2, selector) != 0)
2416
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2417
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2418
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2419
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420
        if (dpl > cpl)
2421
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2422
        if (!(e2 & DESC_P_MASK))
2423
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2424

    
2425
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2426
            /* to inner privilege */
2427
            get_ss_esp_from_tss(&ss, &sp, dpl);
2428
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2429
                        ss, sp, param_count, ESP);
2430
            if ((ss & 0xfffc) == 0)
2431
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432
            if ((ss & 3) != dpl)
2433
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2435
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2437
            if (ss_dpl != dpl)
2438
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439
            if (!(ss_e2 & DESC_S_MASK) ||
2440
                (ss_e2 & DESC_CS_MASK) ||
2441
                !(ss_e2 & DESC_W_MASK))
2442
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443
            if (!(ss_e2 & DESC_P_MASK))
2444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445

    
2446
            //            push_size = ((param_count * 2) + 8) << shift;
2447

    
2448
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2449
            old_ssp = env->segs[R_SS].base;
2450

    
2451
            sp_mask = get_sp_mask(ss_e2);
2452
            ssp = get_seg_base(ss_e1, ss_e2);
2453
            if (shift) {
2454
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2455
                PUSHL(ssp, sp, sp_mask, ESP);
2456
                for(i = param_count - 1; i >= 0; i--) {
2457
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2458
                    PUSHL(ssp, sp, sp_mask, val);
2459
                }
2460
            } else {
2461
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2462
                PUSHW(ssp, sp, sp_mask, ESP);
2463
                for(i = param_count - 1; i >= 0; i--) {
2464
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2465
                    PUSHW(ssp, sp, sp_mask, val);
2466
                }
2467
            }
2468
            new_stack = 1;
2469
        } else {
2470
            /* to same privilege */
2471
            sp = ESP;
2472
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2473
            ssp = env->segs[R_SS].base;
2474
            //            push_size = (4 << shift);
2475
            new_stack = 0;
2476
        }
2477

    
2478
        if (shift) {
2479
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2480
            PUSHL(ssp, sp, sp_mask, next_eip);
2481
        } else {
2482
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2483
            PUSHW(ssp, sp, sp_mask, next_eip);
2484
        }
2485

    
2486
        /* from this point, not restartable */
2487

    
2488
        if (new_stack) {
2489
            ss = (ss & ~3) | dpl;
2490
            cpu_x86_load_seg_cache(env, R_SS, ss,
2491
                                   ssp,
2492
                                   get_seg_limit(ss_e1, ss_e2),
2493
                                   ss_e2);
2494
        }
2495

    
2496
        selector = (selector & ~3) | dpl;
2497
        cpu_x86_load_seg_cache(env, R_CS, selector,
2498
                       get_seg_base(e1, e2),
2499
                       get_seg_limit(e1, e2),
2500
                       e2);
2501
        cpu_x86_set_cpl(env, dpl);
2502
        SET_ESP(sp, sp_mask);
2503
        EIP = offset;
2504
    }
2505
}
2506

    
2507
/* real and vm86 mode iret */
2508
void helper_iret_real(int shift)
2509
{
2510
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2511
    target_ulong ssp;
2512
    int eflags_mask;
2513

    
2514
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2515
    sp = ESP;
2516
    ssp = env->segs[R_SS].base;
2517
    if (shift == 1) {
2518
        /* 32 bits */
2519
        POPL(ssp, sp, sp_mask, new_eip);
2520
        POPL(ssp, sp, sp_mask, new_cs);
2521
        new_cs &= 0xffff;
2522
        POPL(ssp, sp, sp_mask, new_eflags);
2523
    } else {
2524
        /* 16 bits */
2525
        POPW(ssp, sp, sp_mask, new_eip);
2526
        POPW(ssp, sp, sp_mask, new_cs);
2527
        POPW(ssp, sp, sp_mask, new_eflags);
2528
    }
2529
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2530
    env->segs[R_CS].selector = new_cs;
2531
    env->segs[R_CS].base = (new_cs << 4);
2532
    env->eip = new_eip;
2533
    if (env->eflags & VM_MASK)
2534
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2535
    else
2536
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2537
    if (shift == 0)
2538
        eflags_mask &= 0xffff;
2539
    load_eflags(new_eflags, eflags_mask);
2540
    env->hflags2 &= ~HF2_NMI_MASK;
2541
}
2542

    
2543
static inline void validate_seg(int seg_reg, int cpl)
2544
{
2545
    int dpl;
2546
    uint32_t e2;
2547

    
2548
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2549
       they may still contain a valid base. I would be interested to
2550
       know how a real x86_64 CPU behaves */
2551
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2552
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2553
        return;
2554

    
2555
    e2 = env->segs[seg_reg].flags;
2556
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2557
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2558
        /* data or non conforming code segment */
2559
        if (dpl < cpl) {
2560
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2561
        }
2562
    }
2563
}
2564

    
2565
/* protected mode iret */
2566
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2567
{
2568
    uint32_t new_cs, new_eflags, new_ss;
2569
    uint32_t new_es, new_ds, new_fs, new_gs;
2570
    uint32_t e1, e2, ss_e1, ss_e2;
2571
    int cpl, dpl, rpl, eflags_mask, iopl;
2572
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2573

    
2574
#ifdef TARGET_X86_64
2575
    if (shift == 2)
2576
        sp_mask = -1;
2577
    else
2578
#endif
2579
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2580
    sp = ESP;
2581
    ssp = env->segs[R_SS].base;
2582
    new_eflags = 0; /* avoid warning */
2583
#ifdef TARGET_X86_64
2584
    if (shift == 2) {
2585
        POPQ(sp, new_eip);
2586
        POPQ(sp, new_cs);
2587
        new_cs &= 0xffff;
2588
        if (is_iret) {
2589
            POPQ(sp, new_eflags);
2590
        }
2591
    } else
2592
#endif
2593
    if (shift == 1) {
2594
        /* 32 bits */
2595
        POPL(ssp, sp, sp_mask, new_eip);
2596
        POPL(ssp, sp, sp_mask, new_cs);
2597
        new_cs &= 0xffff;
2598
        if (is_iret) {
2599
            POPL(ssp, sp, sp_mask, new_eflags);
2600
            if (new_eflags & VM_MASK)
2601
                goto return_to_vm86;
2602
        }
2603
    } else {
2604
        /* 16 bits */
2605
        POPW(ssp, sp, sp_mask, new_eip);
2606
        POPW(ssp, sp, sp_mask, new_cs);
2607
        if (is_iret)
2608
            POPW(ssp, sp, sp_mask, new_eflags);
2609
    }
2610
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2611
              new_cs, new_eip, shift, addend);
2612
    LOG_PCALL_STATE(env);
2613
    if ((new_cs & 0xfffc) == 0)
2614
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2615
    if (load_segment(&e1, &e2, new_cs) != 0)
2616
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2617
    if (!(e2 & DESC_S_MASK) ||
2618
        !(e2 & DESC_CS_MASK))
2619
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620
    cpl = env->hflags & HF_CPL_MASK;
2621
    rpl = new_cs & 3;
2622
    if (rpl < cpl)
2623
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2625
    if (e2 & DESC_C_MASK) {
2626
        if (dpl > rpl)
2627
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628
    } else {
2629
        if (dpl != rpl)
2630
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631
    }
2632
    if (!(e2 & DESC_P_MASK))
2633
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2634

    
2635
    sp += addend;
2636
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2637
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2638
        /* return to same privilege level */
2639
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2640
                       get_seg_base(e1, e2),
2641
                       get_seg_limit(e1, e2),
2642
                       e2);
2643
    } else {
2644
        /* return to different privilege level */
2645
#ifdef TARGET_X86_64
2646
        if (shift == 2) {
2647
            POPQ(sp, new_esp);
2648
            POPQ(sp, new_ss);
2649
            new_ss &= 0xffff;
2650
        } else
2651
#endif
2652
        if (shift == 1) {
2653
            /* 32 bits */
2654
            POPL(ssp, sp, sp_mask, new_esp);
2655
            POPL(ssp, sp, sp_mask, new_ss);
2656
            new_ss &= 0xffff;
2657
        } else {
2658
            /* 16 bits */
2659
            POPW(ssp, sp, sp_mask, new_esp);
2660
            POPW(ssp, sp, sp_mask, new_ss);
2661
        }
2662
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2663
                    new_ss, new_esp);
2664
        if ((new_ss & 0xfffc) == 0) {
2665
#ifdef TARGET_X86_64
2666
            /* NULL ss is allowed in long mode if cpl != 3*/
2667
            /* XXX: test CS64 ? */
2668
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2669
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2670
                                       0, 0xffffffff,
2671
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2672
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2673
                                       DESC_W_MASK | DESC_A_MASK);
2674
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2675
            } else
2676
#endif
2677
            {
2678
                raise_exception_err(EXCP0D_GPF, 0);
2679
            }
2680
        } else {
2681
            if ((new_ss & 3) != rpl)
2682
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2683
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2684
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685
            if (!(ss_e2 & DESC_S_MASK) ||
2686
                (ss_e2 & DESC_CS_MASK) ||
2687
                !(ss_e2 & DESC_W_MASK))
2688
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2690
            if (dpl != rpl)
2691
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2692
            if (!(ss_e2 & DESC_P_MASK))
2693
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2694
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2695
                                   get_seg_base(ss_e1, ss_e2),
2696
                                   get_seg_limit(ss_e1, ss_e2),
2697
                                   ss_e2);
2698
        }
2699

    
2700
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2701
                       get_seg_base(e1, e2),
2702
                       get_seg_limit(e1, e2),
2703
                       e2);
2704
        cpu_x86_set_cpl(env, rpl);
2705
        sp = new_esp;
2706
#ifdef TARGET_X86_64
2707
        if (env->hflags & HF_CS64_MASK)
2708
            sp_mask = -1;
2709
        else
2710
#endif
2711
            sp_mask = get_sp_mask(ss_e2);
2712

    
2713
        /* validate data segments */
2714
        validate_seg(R_ES, rpl);
2715
        validate_seg(R_DS, rpl);
2716
        validate_seg(R_FS, rpl);
2717
        validate_seg(R_GS, rpl);
2718

    
2719
        sp += addend;
2720
    }
2721
    SET_ESP(sp, sp_mask);
2722
    env->eip = new_eip;
2723
    if (is_iret) {
2724
        /* NOTE: 'cpl' is the _old_ CPL */
2725
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2726
        if (cpl == 0)
2727
            eflags_mask |= IOPL_MASK;
2728
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2729
        if (cpl <= iopl)
2730
            eflags_mask |= IF_MASK;
2731
        if (shift == 0)
2732
            eflags_mask &= 0xffff;
2733
        load_eflags(new_eflags, eflags_mask);
2734
    }
2735
    return;
2736

    
2737
 return_to_vm86:
2738
    POPL(ssp, sp, sp_mask, new_esp);
2739
    POPL(ssp, sp, sp_mask, new_ss);
2740
    POPL(ssp, sp, sp_mask, new_es);
2741
    POPL(ssp, sp, sp_mask, new_ds);
2742
    POPL(ssp, sp, sp_mask, new_fs);
2743
    POPL(ssp, sp, sp_mask, new_gs);
2744

    
2745
    /* modify processor state */
2746
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2747
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2748
    load_seg_vm(R_CS, new_cs & 0xffff);
2749
    cpu_x86_set_cpl(env, 3);
2750
    load_seg_vm(R_SS, new_ss & 0xffff);
2751
    load_seg_vm(R_ES, new_es & 0xffff);
2752
    load_seg_vm(R_DS, new_ds & 0xffff);
2753
    load_seg_vm(R_FS, new_fs & 0xffff);
2754
    load_seg_vm(R_GS, new_gs & 0xffff);
2755

    
2756
    env->eip = new_eip & 0xffff;
2757
    ESP = new_esp;
2758
}
2759

    
2760
void helper_iret_protected(int shift, int next_eip)
2761
{
2762
    int tss_selector, type;
2763
    uint32_t e1, e2;
2764

    
2765
    /* specific case for TSS */
2766
    if (env->eflags & NT_MASK) {
2767
#ifdef TARGET_X86_64
2768
        if (env->hflags & HF_LMA_MASK)
2769
            raise_exception_err(EXCP0D_GPF, 0);
2770
#endif
2771
        tss_selector = lduw_kernel(env->tr.base + 0);
2772
        if (tss_selector & 4)
2773
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2774
        if (load_segment(&e1, &e2, tss_selector) != 0)
2775
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2777
        /* NOTE: we check both segment and busy TSS */
2778
        if (type != 3)
2779
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2781
    } else {
2782
        helper_ret_protected(shift, 1, 0);
2783
    }
2784
    env->hflags2 &= ~HF2_NMI_MASK;
2785
}
2786

    
2787
void helper_lret_protected(int shift, int addend)
2788
{
2789
    helper_ret_protected(shift, 0, addend);
2790
}
2791

    
2792
void helper_sysenter(void)
2793
{
2794
    if (env->sysenter_cs == 0) {
2795
        raise_exception_err(EXCP0D_GPF, 0);
2796
    }
2797
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2798
    cpu_x86_set_cpl(env, 0);
2799

    
2800
#ifdef TARGET_X86_64
2801
    if (env->hflags & HF_LMA_MASK) {
2802
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2803
                               0, 0xffffffff,
2804
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2805
                               DESC_S_MASK |
2806
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2807
    } else
2808
#endif
2809
    {
2810
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2811
                               0, 0xffffffff,
2812
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2813
                               DESC_S_MASK |
2814
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2815
    }
2816
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2817
                           0, 0xffffffff,
2818
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819
                           DESC_S_MASK |
2820
                           DESC_W_MASK | DESC_A_MASK);
2821
    ESP = env->sysenter_esp;
2822
    EIP = env->sysenter_eip;
2823
}
2824

    
2825
void helper_sysexit(int dflag)
2826
{
2827
    int cpl;
2828

    
2829
    cpl = env->hflags & HF_CPL_MASK;
2830
    if (env->sysenter_cs == 0 || cpl != 0) {
2831
        raise_exception_err(EXCP0D_GPF, 0);
2832
    }
2833
    cpu_x86_set_cpl(env, 3);
2834
#ifdef TARGET_X86_64
2835
    if (dflag == 2) {
2836
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2837
                               0, 0xffffffff,
2838
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2839
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2840
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2841
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2842
                               0, 0xffffffff,
2843
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2844
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2845
                               DESC_W_MASK | DESC_A_MASK);
2846
    } else
2847
#endif
2848
    {
2849
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2850
                               0, 0xffffffff,
2851
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2852
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2853
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2854
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2855
                               0, 0xffffffff,
2856
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2857
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2858
                               DESC_W_MASK | DESC_A_MASK);
2859
    }
2860
    ESP = ECX;
2861
    EIP = EDX;
2862
}
2863

    
2864
#if defined(CONFIG_USER_ONLY)
2865
target_ulong helper_read_crN(int reg)
2866
{
2867
    return 0;
2868
}
2869

    
2870
void helper_write_crN(int reg, target_ulong t0)
2871
{
2872
}
2873

    
2874
void helper_movl_drN_T0(int reg, target_ulong t0)
2875
{
2876
}
2877
#else
2878
target_ulong helper_read_crN(int reg)
2879
{
2880
    target_ulong val;
2881

    
2882
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2883
    switch(reg) {
2884
    default:
2885
        val = env->cr[reg];
2886
        break;
2887
    case 8:
2888
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2889
            val = cpu_get_apic_tpr(env->apic_state);
2890
        } else {
2891
            val = env->v_tpr;
2892
        }
2893
        break;
2894
    }
2895
    return val;
2896
}
2897

    
2898
void helper_write_crN(int reg, target_ulong t0)
2899
{
2900
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2901
    switch(reg) {
2902
    case 0:
2903
        cpu_x86_update_cr0(env, t0);
2904
        break;
2905
    case 3:
2906
        cpu_x86_update_cr3(env, t0);
2907
        break;
2908
    case 4:
2909
        cpu_x86_update_cr4(env, t0);
2910
        break;
2911
    case 8:
2912
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2913
            cpu_set_apic_tpr(env->apic_state, t0);
2914
        }
2915
        env->v_tpr = t0 & 0x0f;
2916
        break;
2917
    default:
2918
        env->cr[reg] = t0;
2919
        break;
2920
    }
2921
}
2922

    
2923
void helper_movl_drN_T0(int reg, target_ulong t0)
2924
{
2925
    int i;
2926

    
2927
    if (reg < 4) {
2928
        hw_breakpoint_remove(env, reg);
2929
        env->dr[reg] = t0;
2930
        hw_breakpoint_insert(env, reg);
2931
    } else if (reg == 7) {
2932
        for (i = 0; i < 4; i++)
2933
            hw_breakpoint_remove(env, i);
2934
        env->dr[7] = t0;
2935
        for (i = 0; i < 4; i++)
2936
            hw_breakpoint_insert(env, i);
2937
    } else
2938
        env->dr[reg] = t0;
2939
}
2940
#endif
2941

    
2942
void helper_lmsw(target_ulong t0)
2943
{
2944
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2945
       if already set to one. */
2946
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2947
    helper_write_crN(0, t0);
2948
}
2949

    
2950
void helper_clts(void)
2951
{
2952
    env->cr[0] &= ~CR0_TS_MASK;
2953
    env->hflags &= ~HF_TS_MASK;
2954
}
2955

    
2956
void helper_invlpg(target_ulong addr)
2957
{
2958
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2959
    tlb_flush_page(env, addr);
2960
}
2961

    
2962
void helper_rdtsc(void)
2963
{
2964
    uint64_t val;
2965

    
2966
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2967
        raise_exception(EXCP0D_GPF);
2968
    }
2969
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2970

    
2971
    val = cpu_get_tsc(env) + env->tsc_offset;
2972
    EAX = (uint32_t)(val);
2973
    EDX = (uint32_t)(val >> 32);
2974
}
2975

    
2976
void helper_rdtscp(void)
2977
{
2978
    helper_rdtsc();
2979
    ECX = (uint32_t)(env->tsc_aux);
2980
}
2981

    
2982
void helper_rdpmc(void)
2983
{
2984
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2985
        raise_exception(EXCP0D_GPF);
2986
    }
2987
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2988
    
2989
    /* currently unimplemented */
2990
    raise_exception_err(EXCP06_ILLOP, 0);
2991
}
2992

    
2993
#if defined(CONFIG_USER_ONLY)
2994
void helper_wrmsr(void)
2995
{
2996
}
2997

    
2998
void helper_rdmsr(void)
2999
{
3000
}
3001
#else
3002
void helper_wrmsr(void)
3003
{
3004
    uint64_t val;
3005

    
3006
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3007

    
3008
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3009

    
3010
    switch((uint32_t)ECX) {
3011
    case MSR_IA32_SYSENTER_CS:
3012
        env->sysenter_cs = val & 0xffff;
3013
        break;
3014
    case MSR_IA32_SYSENTER_ESP:
3015
        env->sysenter_esp = val;
3016
        break;
3017
    case MSR_IA32_SYSENTER_EIP:
3018
        env->sysenter_eip = val;
3019
        break;
3020
    case MSR_IA32_APICBASE:
3021
        cpu_set_apic_base(env->apic_state, val);
3022
        break;
3023
    case MSR_EFER:
3024
        {
3025
            uint64_t update_mask;
3026
            update_mask = 0;
3027
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3028
                update_mask |= MSR_EFER_SCE;
3029
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3030
                update_mask |= MSR_EFER_LME;
3031
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3032
                update_mask |= MSR_EFER_FFXSR;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3034
                update_mask |= MSR_EFER_NXE;
3035
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3036
                update_mask |= MSR_EFER_SVME;
3037
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038
                update_mask |= MSR_EFER_FFXSR;
3039
            cpu_load_efer(env, (env->efer & ~update_mask) |
3040
                          (val & update_mask));
3041
        }
3042
        break;
3043
    case MSR_STAR:
3044
        env->star = val;
3045
        break;
3046
    case MSR_PAT:
3047
        env->pat = val;
3048
        break;
3049
    case MSR_VM_HSAVE_PA:
3050
        env->vm_hsave = val;
3051
        break;
3052
#ifdef TARGET_X86_64
3053
    case MSR_LSTAR:
3054
        env->lstar = val;
3055
        break;
3056
    case MSR_CSTAR:
3057
        env->cstar = val;
3058
        break;
3059
    case MSR_FMASK:
3060
        env->fmask = val;
3061
        break;
3062
    case MSR_FSBASE:
3063
        env->segs[R_FS].base = val;
3064
        break;
3065
    case MSR_GSBASE:
3066
        env->segs[R_GS].base = val;
3067
        break;
3068
    case MSR_KERNELGSBASE:
3069
        env->kernelgsbase = val;
3070
        break;
3071
#endif
3072
    case MSR_MTRRphysBase(0):
3073
    case MSR_MTRRphysBase(1):
3074
    case MSR_MTRRphysBase(2):
3075
    case MSR_MTRRphysBase(3):
3076
    case MSR_MTRRphysBase(4):
3077
    case MSR_MTRRphysBase(5):
3078
    case MSR_MTRRphysBase(6):
3079
    case MSR_MTRRphysBase(7):
3080
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3081
        break;
3082
    case MSR_MTRRphysMask(0):
3083
    case MSR_MTRRphysMask(1):
3084
    case MSR_MTRRphysMask(2):
3085
    case MSR_MTRRphysMask(3):
3086
    case MSR_MTRRphysMask(4):
3087
    case MSR_MTRRphysMask(5):
3088
    case MSR_MTRRphysMask(6):
3089
    case MSR_MTRRphysMask(7):
3090
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3091
        break;
3092
    case MSR_MTRRfix64K_00000:
3093
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3094
        break;
3095
    case MSR_MTRRfix16K_80000:
3096
    case MSR_MTRRfix16K_A0000:
3097
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3098
        break;
3099
    case MSR_MTRRfix4K_C0000:
3100
    case MSR_MTRRfix4K_C8000:
3101
    case MSR_MTRRfix4K_D0000:
3102
    case MSR_MTRRfix4K_D8000:
3103
    case MSR_MTRRfix4K_E0000:
3104
    case MSR_MTRRfix4K_E8000:
3105
    case MSR_MTRRfix4K_F0000:
3106
    case MSR_MTRRfix4K_F8000:
3107
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3108
        break;
3109
    case MSR_MTRRdefType:
3110
        env->mtrr_deftype = val;
3111
        break;
3112
    case MSR_MCG_STATUS:
3113
        env->mcg_status = val;
3114
        break;
3115
    case MSR_MCG_CTL:
3116
        if ((env->mcg_cap & MCG_CTL_P)
3117
            && (val == 0 || val == ~(uint64_t)0))
3118
            env->mcg_ctl = val;
3119
        break;
3120
    case MSR_TSC_AUX:
3121
        env->tsc_aux = val;
3122
        break;
3123
    default:
3124
        if ((uint32_t)ECX >= MSR_MC0_CTL
3125
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3126
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3127
            if ((offset & 0x3) != 0
3128
                || (val == 0 || val == ~(uint64_t)0))
3129
                env->mce_banks[offset] = val;
3130
            break;
3131
        }
3132
        /* XXX: exception ? */
3133
        break;
3134
    }
3135
}
3136

    
3137
void helper_rdmsr(void)
3138
{
3139
    uint64_t val;
3140

    
3141
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3142

    
3143
    switch((uint32_t)ECX) {
3144
    case MSR_IA32_SYSENTER_CS:
3145
        val = env->sysenter_cs;
3146
        break;
3147
    case MSR_IA32_SYSENTER_ESP:
3148
        val = env->sysenter_esp;
3149
        break;
3150
    case MSR_IA32_SYSENTER_EIP:
3151
        val = env->sysenter_eip;
3152
        break;
3153
    case MSR_IA32_APICBASE:
3154
        val = cpu_get_apic_base(env->apic_state);
3155
        break;
3156
    case MSR_EFER:
3157
        val = env->efer;
3158
        break;
3159
    case MSR_STAR:
3160
        val = env->star;
3161
        break;
3162
    case MSR_PAT:
3163
        val = env->pat;
3164
        break;
3165
    case MSR_VM_HSAVE_PA:
3166
        val = env->vm_hsave;
3167
        break;
3168
    case MSR_IA32_PERF_STATUS:
3169
        /* tsc_increment_by_tick */
3170
        val = 1000ULL;
3171
        /* CPU multiplier */
3172
        val |= (((uint64_t)4ULL) << 40);
3173
        break;
3174
#ifdef TARGET_X86_64
3175
    case MSR_LSTAR:
3176
        val = env->lstar;
3177
        break;
3178
    case MSR_CSTAR:
3179
        val = env->cstar;
3180
        break;
3181
    case MSR_FMASK:
3182
        val = env->fmask;
3183
        break;
3184
    case MSR_FSBASE:
3185
        val = env->segs[R_FS].base;
3186
        break;
3187
    case MSR_GSBASE:
3188
        val = env->segs[R_GS].base;
3189
        break;
3190
    case MSR_KERNELGSBASE:
3191
        val = env->kernelgsbase;
3192
        break;
3193
    case MSR_TSC_AUX:
3194
        val = env->tsc_aux;
3195
        break;
3196
#endif
3197
    case MSR_MTRRphysBase(0):
3198
    case MSR_MTRRphysBase(1):
3199
    case MSR_MTRRphysBase(2):
3200
    case MSR_MTRRphysBase(3):
3201
    case MSR_MTRRphysBase(4):
3202
    case MSR_MTRRphysBase(5):
3203
    case MSR_MTRRphysBase(6):
3204
    case MSR_MTRRphysBase(7):
3205
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3206
        break;
3207
    case MSR_MTRRphysMask(0):
3208
    case MSR_MTRRphysMask(1):
3209
    case MSR_MTRRphysMask(2):
3210
    case MSR_MTRRphysMask(3):
3211
    case MSR_MTRRphysMask(4):
3212
    case MSR_MTRRphysMask(5):
3213
    case MSR_MTRRphysMask(6):
3214
    case MSR_MTRRphysMask(7):
3215
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3216
        break;
3217
    case MSR_MTRRfix64K_00000:
3218
        val = env->mtrr_fixed[0];
3219
        break;
3220
    case MSR_MTRRfix16K_80000:
3221
    case MSR_MTRRfix16K_A0000:
3222
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3223
        break;
3224
    case MSR_MTRRfix4K_C0000:
3225
    case MSR_MTRRfix4K_C8000:
3226
    case MSR_MTRRfix4K_D0000:
3227
    case MSR_MTRRfix4K_D8000:
3228
    case MSR_MTRRfix4K_E0000:
3229
    case MSR_MTRRfix4K_E8000:
3230
    case MSR_MTRRfix4K_F0000:
3231
    case MSR_MTRRfix4K_F8000:
3232
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3233
        break;
3234
    case MSR_MTRRdefType:
3235
        val = env->mtrr_deftype;
3236
        break;
3237
    case MSR_MTRRcap:
3238
        if (env->cpuid_features & CPUID_MTRR)
3239
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3240
        else
3241
            /* XXX: exception ? */
3242
            val = 0;
3243
        break;
3244
    case MSR_MCG_CAP:
3245
        val = env->mcg_cap;
3246
        break;
3247
    case MSR_MCG_CTL:
3248
        if (env->mcg_cap & MCG_CTL_P)
3249
            val = env->mcg_ctl;
3250
        else
3251
            val = 0;
3252
        break;
3253
    case MSR_MCG_STATUS:
3254
        val = env->mcg_status;
3255
        break;
3256
    default:
3257
        if ((uint32_t)ECX >= MSR_MC0_CTL
3258
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3259
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3260
            val = env->mce_banks[offset];
3261
            break;
3262
        }
3263
        /* XXX: exception ? */
3264
        val = 0;
3265
        break;
3266
    }
3267
    EAX = (uint32_t)(val);
3268
    EDX = (uint32_t)(val >> 32);
3269
}
3270
#endif
3271

    
3272
target_ulong helper_lsl(target_ulong selector1)
3273
{
3274
    unsigned int limit;
3275
    uint32_t e1, e2, eflags, selector;
3276
    int rpl, dpl, cpl, type;
3277

    
3278
    selector = selector1 & 0xffff;
3279
    eflags = helper_cc_compute_all(CC_OP);
3280
    if ((selector & 0xfffc) == 0)
3281
        goto fail;
3282
    if (load_segment(&e1, &e2, selector) != 0)
3283
        goto fail;
3284
    rpl = selector & 3;
3285
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3286
    cpl = env->hflags & HF_CPL_MASK;
3287
    if (e2 & DESC_S_MASK) {
3288
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3289
            /* conforming */
3290
        } else {
3291
            if (dpl < cpl || dpl < rpl)
3292
                goto fail;
3293
        }
3294
    } else {
3295
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3296
        switch(type) {
3297
        case 1:
3298
        case 2:
3299
        case 3:
3300
        case 9:
3301
        case 11:
3302
            break;
3303
        default:
3304
            goto fail;
3305
        }
3306
        if (dpl < cpl || dpl < rpl) {
3307
        fail:
3308
            CC_SRC = eflags & ~CC_Z;
3309
            return 0;
3310
        }
3311
    }
3312
    limit = get_seg_limit(e1, e2);
3313
    CC_SRC = eflags | CC_Z;
3314
    return limit;
3315
}
3316

    
3317
target_ulong helper_lar(target_ulong selector1)
3318
{
3319
    uint32_t e1, e2, eflags, selector;
3320
    int rpl, dpl, cpl, type;
3321

    
3322
    selector = selector1 & 0xffff;
3323
    eflags = helper_cc_compute_all(CC_OP);
3324
    if ((selector & 0xfffc) == 0)
3325
        goto fail;
3326
    if (load_segment(&e1, &e2, selector) != 0)
3327
        goto fail;
3328
    rpl = selector & 3;
3329
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3330
    cpl = env->hflags & HF_CPL_MASK;
3331
    if (e2 & DESC_S_MASK) {
3332
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3333
            /* conforming */
3334
        } else {
3335
            if (dpl < cpl || dpl < rpl)
3336
                goto fail;
3337
        }
3338
    } else {
3339
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3340
        switch(type) {
3341
        case 1:
3342
        case 2:
3343
        case 3:
3344
        case 4:
3345
        case 5:
3346
        case 9:
3347
        case 11:
3348
        case 12:
3349
            break;
3350
        default:
3351
            goto fail;
3352
        }
3353
        if (dpl < cpl || dpl < rpl) {
3354
        fail:
3355
            CC_SRC = eflags & ~CC_Z;
3356
            return 0;
3357
        }
3358
    }
3359
    CC_SRC = eflags | CC_Z;
3360
    return e2 & 0x00f0ff00;
3361
}
3362

    
3363
void helper_verr(target_ulong selector1)
3364
{
3365
    uint32_t e1, e2, eflags, selector;
3366
    int rpl, dpl, cpl;
3367

    
3368
    selector = selector1 & 0xffff;
3369
    eflags = helper_cc_compute_all(CC_OP);
3370
    if ((selector & 0xfffc) == 0)
3371
        goto fail;
3372
    if (load_segment(&e1, &e2, selector) != 0)
3373
        goto fail;
3374
    if (!(e2 & DESC_S_MASK))
3375
        goto fail;
3376
    rpl = selector & 3;
3377
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3378
    cpl = env->hflags & HF_CPL_MASK;
3379
    if (e2 & DESC_CS_MASK) {
3380
        if (!(e2 & DESC_R_MASK))
3381
            goto fail;
3382
        if (!(e2 & DESC_C_MASK)) {
3383
            if (dpl < cpl || dpl < rpl)
3384
                goto fail;
3385
        }
3386
    } else {
3387
        if (dpl < cpl || dpl < rpl) {
3388
        fail:
3389
            CC_SRC = eflags & ~CC_Z;
3390
            return;
3391
        }
3392
    }
3393
    CC_SRC = eflags | CC_Z;
3394
}
3395

    
3396
void helper_verw(target_ulong selector1)
3397
{
3398
    uint32_t e1, e2, eflags, selector;
3399
    int rpl, dpl, cpl;
3400

    
3401
    selector = selector1 & 0xffff;
3402
    eflags = helper_cc_compute_all(CC_OP);
3403
    if ((selector & 0xfffc) == 0)
3404
        goto fail;
3405
    if (load_segment(&e1, &e2, selector) != 0)
3406
        goto fail;
3407
    if (!(e2 & DESC_S_MASK))
3408
        goto fail;
3409
    rpl = selector & 3;
3410
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3411
    cpl = env->hflags & HF_CPL_MASK;
3412
    if (e2 & DESC_CS_MASK) {
3413
        goto fail;
3414
    } else {
3415
        if (dpl < cpl || dpl < rpl)
3416
            goto fail;
3417
        if (!(e2 & DESC_W_MASK)) {
3418
        fail:
3419
            CC_SRC = eflags & ~CC_Z;
3420
            return;
3421
        }
3422
    }
3423
    CC_SRC = eflags | CC_Z;
3424
}
3425

    
3426
/* x87 FPU helpers */
3427

    
3428
static inline double floatx80_to_double(floatx80 a)
3429
{
3430
    union {
3431
        float64 f64;
3432
        double d;
3433
    } u;
3434

    
3435
    u.f64 = floatx80_to_float64(a, &env->fp_status);
3436
    return u.d;
3437
}
3438

    
3439
static inline floatx80 double_to_floatx80(double a)
3440
{
3441
    union {
3442
        float64 f64;
3443
        double d;
3444
    } u;
3445

    
3446
    u.d = a;
3447
    return float64_to_floatx80(u.f64, &env->fp_status);
3448
}
3449

    
3450
static void fpu_set_exception(int mask)
3451
{
3452
    env->fpus |= mask;
3453
    if (env->fpus & (~env->fpuc & FPUC_EM))
3454
        env->fpus |= FPUS_SE | FPUS_B;
3455
}
3456

    
3457
static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3458
{
3459
    if (floatx80_is_zero(b)) {
3460
        fpu_set_exception(FPUS_ZE);
3461
    }
3462
    return floatx80_div(a, b, &env->fp_status);
3463
}
3464

    
3465
static void fpu_raise_exception(void)
3466
{
3467
    if (env->cr[0] & CR0_NE_MASK) {
3468
        raise_exception(EXCP10_COPR);
3469
    }
3470
#if !defined(CONFIG_USER_ONLY)
3471
    else {
3472
        cpu_set_ferr(env);
3473
    }
3474
#endif
3475
}
3476

    
3477
void helper_flds_FT0(uint32_t val)
3478
{
3479
    union {
3480
        float32 f;
3481
        uint32_t i;
3482
    } u;
3483
    u.i = val;
3484
    FT0 = float32_to_floatx80(u.f, &env->fp_status);
3485
}
3486

    
3487
void helper_fldl_FT0(uint64_t val)
3488
{
3489
    union {
3490
        float64 f;
3491
        uint64_t i;
3492
    } u;
3493
    u.i = val;
3494
    FT0 = float64_to_floatx80(u.f, &env->fp_status);
3495
}
3496

    
3497
void helper_fildl_FT0(int32_t val)
3498
{
3499
    FT0 = int32_to_floatx80(val, &env->fp_status);
3500
}
3501

    
3502
void helper_flds_ST0(uint32_t val)
3503
{
3504
    int new_fpstt;
3505
    union {
3506
        float32 f;
3507
        uint32_t i;
3508
    } u;
3509
    new_fpstt = (env->fpstt - 1) & 7;
3510
    u.i = val;
3511
    env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3512
    env->fpstt = new_fpstt;
3513
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3514
}
3515

    
3516
void helper_fldl_ST0(uint64_t val)
3517
{
3518
    int new_fpstt;
3519
    union {
3520
        float64 f;
3521
        uint64_t i;
3522
    } u;
3523
    new_fpstt = (env->fpstt - 1) & 7;
3524
    u.i = val;
3525
    env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3526
    env->fpstt = new_fpstt;
3527
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3528
}
3529

    
3530
void helper_fildl_ST0(int32_t val)
3531
{
3532
    int new_fpstt;
3533
    new_fpstt = (env->fpstt - 1) & 7;
3534
    env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3535
    env->fpstt = new_fpstt;
3536
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3537
}
3538

    
3539
void helper_fildll_ST0(int64_t val)
3540
{
3541
    int new_fpstt;
3542
    new_fpstt = (env->fpstt - 1) & 7;
3543
    env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3544
    env->fpstt = new_fpstt;
3545
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3546
}
3547

    
3548
uint32_t helper_fsts_ST0(void)
3549
{
3550
    union {
3551
        float32 f;
3552
        uint32_t i;
3553
    } u;
3554
    u.f = floatx80_to_float32(ST0, &env->fp_status);
3555
    return u.i;
3556
}
3557

    
3558
uint64_t helper_fstl_ST0(void)
3559
{
3560
    union {
3561
        float64 f;
3562
        uint64_t i;
3563
    } u;
3564
    u.f = floatx80_to_float64(ST0, &env->fp_status);
3565
    return u.i;
3566
}
3567

    
3568
int32_t helper_fist_ST0(void)
3569
{
3570
    int32_t val;
3571
    val = floatx80_to_int32(ST0, &env->fp_status);
3572
    if (val != (int16_t)val)
3573
        val = -32768;
3574
    return val;
3575
}
3576

    
3577
int32_t helper_fistl_ST0(void)
3578
{
3579
    int32_t val;
3580
    val = floatx80_to_int32(ST0, &env->fp_status);
3581
    return val;
3582
}
3583

    
3584
int64_t helper_fistll_ST0(void)
3585
{
3586
    int64_t val;
3587
    val = floatx80_to_int64(ST0, &env->fp_status);
3588
    return val;
3589
}
3590

    
3591
int32_t helper_fistt_ST0(void)
3592
{
3593
    int32_t val;
3594
    val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3595
    if (val != (int16_t)val)
3596
        val = -32768;
3597
    return val;
3598
}
3599

    
3600
int32_t helper_fisttl_ST0(void)
3601
{
3602
    int32_t val;
3603
    val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3604
    return val;
3605
}
3606

    
3607
int64_t helper_fisttll_ST0(void)
3608
{
3609
    int64_t val;
3610
    val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3611
    return val;
3612
}
3613

    
3614
void helper_fldt_ST0(target_ulong ptr)
3615
{
3616
    int new_fpstt;
3617
    new_fpstt = (env->fpstt - 1) & 7;
3618
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3619
    env->fpstt = new_fpstt;
3620
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3621
}
3622

    
3623
void helper_fstt_ST0(target_ulong ptr)
3624
{
3625
    helper_fstt(ST0, ptr);
3626
}
3627

    
3628
void helper_fpush(void)
3629
{
3630
    fpush();
3631
}
3632

    
3633
void helper_fpop(void)
3634
{
3635
    fpop();
3636
}
3637

    
3638
void helper_fdecstp(void)
3639
{
3640
    env->fpstt = (env->fpstt - 1) & 7;
3641
    env->fpus &= (~0x4700);
3642
}
3643

    
3644
void helper_fincstp(void)
3645
{
3646
    env->fpstt = (env->fpstt + 1) & 7;
3647
    env->fpus &= (~0x4700);
3648
}
3649

    
3650
/* FPU move */
3651

    
3652
void helper_ffree_STN(int st_index)
3653
{
3654
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3655
}
3656

    
3657
void helper_fmov_ST0_FT0(void)
3658
{
3659
    ST0 = FT0;
3660
}
3661

    
3662
void helper_fmov_FT0_STN(int st_index)
3663
{
3664
    FT0 = ST(st_index);
3665
}
3666

    
3667
void helper_fmov_ST0_STN(int st_index)
3668
{
3669
    ST0 = ST(st_index);
3670
}
3671

    
3672
void helper_fmov_STN_ST0(int st_index)
3673
{
3674
    ST(st_index) = ST0;
3675
}
3676

    
3677
void helper_fxchg_ST0_STN(int st_index)
3678
{
3679
    floatx80 tmp;
3680
    tmp = ST(st_index);
3681
    ST(st_index) = ST0;
3682
    ST0 = tmp;
3683
}
3684

    
3685
/* FPU operations */
3686

    
3687
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3688

    
3689
void helper_fcom_ST0_FT0(void)
3690
{
3691
    int ret;
3692

    
3693
    ret = floatx80_compare(ST0, FT0, &env->fp_status);
3694
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3695
}
3696

    
3697
void helper_fucom_ST0_FT0(void)
3698
{
3699
    int ret;
3700

    
3701
    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3702
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3703
}
3704

    
3705
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3706

    
3707
void helper_fcomi_ST0_FT0(void)
3708
{
3709
    int eflags;
3710
    int ret;
3711

    
3712
    ret = floatx80_compare(ST0, FT0, &env->fp_status);
3713
    eflags = helper_cc_compute_all(CC_OP);
3714
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3715
    CC_SRC = eflags;
3716
}
3717

    
3718
void helper_fucomi_ST0_FT0(void)
3719
{
3720
    int eflags;
3721
    int ret;
3722

    
3723
    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3724
    eflags = helper_cc_compute_all(CC_OP);
3725
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3726
    CC_SRC = eflags;
3727
}
3728

    
3729
void helper_fadd_ST0_FT0(void)
3730
{
3731
    ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3732
}
3733

    
3734
void helper_fmul_ST0_FT0(void)
3735
{
3736
    ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3737
}
3738

    
3739
void helper_fsub_ST0_FT0(void)
3740
{
3741
    ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3742
}
3743

    
3744
void helper_fsubr_ST0_FT0(void)
3745
{
3746
    ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3747
}
3748

    
3749
void helper_fdiv_ST0_FT0(void)
3750
{
3751
    ST0 = helper_fdiv(ST0, FT0);
3752
}
3753

    
3754
void helper_fdivr_ST0_FT0(void)
3755
{
3756
    ST0 = helper_fdiv(FT0, ST0);
3757
}
3758

    
3759
/* fp operations between STN and ST0 */
3760

    
3761
void helper_fadd_STN_ST0(int st_index)
3762
{
3763
    ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3764
}
3765

    
3766
void helper_fmul_STN_ST0(int st_index)
3767
{
3768
    ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3769
}
3770

    
3771
void helper_fsub_STN_ST0(int st_index)
3772
{
3773
    ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3774
}
3775

    
3776
void helper_fsubr_STN_ST0(int st_index)
3777
{
3778
    ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3779
}
3780

    
3781
void helper_fdiv_STN_ST0(int st_index)
3782
{
3783
    floatx80 *p;
3784
    p = &ST(st_index);
3785
    *p = helper_fdiv(*p, ST0);
3786
}
3787

    
3788
void helper_fdivr_STN_ST0(int st_index)
3789
{
3790
    floatx80 *p;
3791
    p = &ST(st_index);
3792
    *p = helper_fdiv(ST0, *p);
3793
}
3794

    
3795
/* misc FPU operations */
3796
void helper_fchs_ST0(void)
3797
{
3798
    ST0 = floatx80_chs(ST0);
3799
}
3800

    
3801
void helper_fabs_ST0(void)
3802
{
3803
    ST0 = floatx80_abs(ST0);
3804
}
3805

    
3806
void helper_fld1_ST0(void)
3807
{
3808
    ST0 = floatx80_one;
3809
}
3810

    
3811
void helper_fldl2t_ST0(void)
3812
{
3813
    ST0 = floatx80_l2t;
3814
}
3815

    
3816
void helper_fldl2e_ST0(void)
3817
{
3818
    ST0 = floatx80_l2e;
3819
}
3820

    
3821
void helper_fldpi_ST0(void)
3822
{
3823
    ST0 = floatx80_pi;
3824
}
3825

    
3826
void helper_fldlg2_ST0(void)
3827
{
3828
    ST0 = floatx80_lg2;
3829
}
3830

    
3831
void helper_fldln2_ST0(void)
3832
{
3833
    ST0 = floatx80_ln2;
3834
}
3835

    
3836
void helper_fldz_ST0(void)
3837
{
3838
    ST0 = floatx80_zero;
3839
}
3840

    
3841
void helper_fldz_FT0(void)
3842
{
3843
    FT0 = floatx80_zero;
3844
}
3845

    
3846
uint32_t helper_fnstsw(void)
3847
{
3848
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3849
}
3850

    
3851
uint32_t helper_fnstcw(void)
3852
{
3853
    return env->fpuc;
3854
}
3855

    
3856
static void update_fp_status(void)
3857
{
3858
    int rnd_type;
3859

    
3860
    /* set rounding mode */
3861
    switch(env->fpuc & RC_MASK) {
3862
    default:
3863
    case RC_NEAR:
3864
        rnd_type = float_round_nearest_even;
3865
        break;
3866
    case RC_DOWN:
3867
        rnd_type = float_round_down;
3868
        break;
3869
    case RC_UP:
3870
        rnd_type = float_round_up;
3871
        break;
3872
    case RC_CHOP:
3873
        rnd_type = float_round_to_zero;
3874
        break;
3875
    }
3876
    set_float_rounding_mode(rnd_type, &env->fp_status);
3877
    switch((env->fpuc >> 8) & 3) {
3878
    case 0:
3879
        rnd_type = 32;
3880
        break;
3881
    case 2:
3882
        rnd_type = 64;
3883
        break;
3884
    case 3:
3885
    default:
3886
        rnd_type = 80;
3887
        break;
3888
    }
3889
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3890
}
3891

    
3892
void helper_fldcw(uint32_t val)
3893
{
3894
    env->fpuc = val;
3895
    update_fp_status();
3896
}
3897

    
3898
void helper_fclex(void)
3899
{
3900
    env->fpus &= 0x7f00;
3901
}
3902

    
3903
void helper_fwait(void)
3904
{
3905
    if (env->fpus & FPUS_SE)
3906
        fpu_raise_exception();
3907
}
3908

    
3909
void helper_fninit(void)
3910
{
3911
    env->fpus = 0;
3912
    env->fpstt = 0;
3913
    env->fpuc = 0x37f;
3914
    env->fptags[0] = 1;
3915
    env->fptags[1] = 1;
3916
    env->fptags[2] = 1;
3917
    env->fptags[3] = 1;
3918
    env->fptags[4] = 1;
3919
    env->fptags[5] = 1;
3920
    env->fptags[6] = 1;
3921
    env->fptags[7] = 1;
3922
}
3923

    
3924
/* BCD ops */
3925

    
3926
void helper_fbld_ST0(target_ulong ptr)
3927
{
3928
    floatx80 tmp;
3929
    uint64_t val;
3930
    unsigned int v;
3931
    int i;
3932

    
3933
    val = 0;
3934
    for(i = 8; i >= 0; i--) {
3935
        v = ldub(ptr + i);
3936
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3937
    }
3938
    tmp = int64_to_floatx80(val, &env->fp_status);
3939
    if (ldub(ptr + 9) & 0x80) {
3940
        floatx80_chs(tmp);
3941
    }
3942
    fpush();
3943
    ST0 = tmp;
3944
}
3945

    
3946
void helper_fbst_ST0(target_ulong ptr)
3947
{
3948
    int v;
3949
    target_ulong mem_ref, mem_end;
3950
    int64_t val;
3951

    
3952
    val = floatx80_to_int64(ST0, &env->fp_status);
3953
    mem_ref = ptr;
3954
    mem_end = mem_ref + 9;
3955
    if (val < 0) {
3956
        stb(mem_end, 0x80);
3957
        val = -val;
3958
    } else {
3959
        stb(mem_end, 0x00);
3960
    }
3961
    while (mem_ref < mem_end) {
3962
        if (val == 0)
3963
            break;
3964
        v = val % 100;
3965
        val = val / 100;
3966
        v = ((v / 10) << 4) | (v % 10);
3967
        stb(mem_ref++, v);
3968
    }
3969
    while (mem_ref < mem_end) {
3970
        stb(mem_ref++, 0);
3971
    }
3972
}
3973

    
3974
void helper_f2xm1(void)
3975
{
3976
    double val = floatx80_to_double(ST0);
3977
    val = pow(2.0, val) - 1.0;
3978
    ST0 = double_to_floatx80(val);
3979
}
3980

    
3981
void helper_fyl2x(void)
3982
{
3983
    double fptemp = floatx80_to_double(ST0);
3984

    
3985
    if (fptemp>0.0){
3986
        fptemp = log(fptemp)/log(2.0);    /* log2(ST) */
3987
        fptemp *= floatx80_to_double(ST1);
3988
        ST1 = double_to_floatx80(fptemp);
3989
        fpop();
3990
    } else {
3991
        env->fpus &= (~0x4700);
3992
        env->fpus |= 0x400;
3993
    }
3994
}
3995

    
3996
void helper_fptan(void)
3997
{
3998
    double fptemp = floatx80_to_double(ST0);
3999

    
4000
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4001
        env->fpus |= 0x400;
4002
    } else {
4003
        fptemp = tan(fptemp);
4004
        ST0 = double_to_floatx80(fptemp);
4005
        fpush();
4006
        ST0 = floatx80_one;
4007
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4008
        /* the above code is for  |arg| < 2**52 only */
4009
    }
4010
}
4011

    
4012
void helper_fpatan(void)
4013
{
4014
    double fptemp, fpsrcop;
4015

    
4016
    fpsrcop = floatx80_to_double(ST1);
4017
    fptemp = floatx80_to_double(ST0);
4018
    ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4019
    fpop();
4020
}
4021

    
4022
void helper_fxtract(void)
4023
{
4024
    CPU_LDoubleU temp;
4025

    
4026
    temp.d = ST0;
4027

    
4028
    if (floatx80_is_zero(ST0)) {
4029
        /* Easy way to generate -inf and raising division by 0 exception */
4030
        ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4031
        fpush();
4032
        ST0 = temp.d;
4033
    } else {
4034
        int expdif;
4035

    
4036
        expdif = EXPD(temp) - EXPBIAS;
4037
        /*DP exponent bias*/
4038
        ST0 = int32_to_floatx80(expdif, &env->fp_status);
4039
        fpush();
4040
        BIASEXPONENT(temp);
4041
        ST0 = temp.d;
4042
    }
4043
}
4044

    
4045
void helper_fprem1(void)
4046
{
4047
    double st0, st1, dblq, fpsrcop, fptemp;
4048
    CPU_LDoubleU fpsrcop1, fptemp1;
4049
    int expdif;
4050
    signed long long int q;
4051

    
4052
    st0 = floatx80_to_double(ST0);
4053
    st1 = floatx80_to_double(ST1);
4054

    
4055
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4056
        ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4057
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4058
        return;
4059
    }
4060

    
4061
    fpsrcop = st0;
4062
    fptemp = st1;
4063
    fpsrcop1.d = ST0;
4064
    fptemp1.d = ST1;
4065
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4066

    
4067
    if (expdif < 0) {
4068
        /* optimisation? taken from the AMD docs */
4069
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070
        /* ST0 is unchanged */
4071
        return;
4072
    }
4073

    
4074
    if (expdif < 53) {
4075
        dblq = fpsrcop / fptemp;
4076
        /* round dblq towards nearest integer */
4077
        dblq = rint(dblq);
4078
        st0 = fpsrcop - fptemp * dblq;
4079

    
4080
        /* convert dblq to q by truncating towards zero */
4081
        if (dblq < 0.0)
4082
           q = (signed long long int)(-dblq);
4083
        else
4084
           q = (signed long long int)dblq;
4085

    
4086
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4087
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4088
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4089
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4090
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4091
    } else {
4092
        env->fpus |= 0x400;  /* C2 <-- 1 */
4093
        fptemp = pow(2.0, expdif - 50);
4094
        fpsrcop = (st0 / st1) / fptemp;
4095
        /* fpsrcop = integer obtained by chopping */
4096
        fpsrcop = (fpsrcop < 0.0) ?
4097
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4098
        st0 -= (st1 * fpsrcop * fptemp);
4099
    }
4100
    ST0 = double_to_floatx80(st0);
4101
}
4102

    
4103
void helper_fprem(void)
4104
{
4105
    double st0, st1, dblq, fpsrcop, fptemp;
4106
    CPU_LDoubleU fpsrcop1, fptemp1;
4107
    int expdif;
4108
    signed long long int q;
4109

    
4110
    st0 = floatx80_to_double(ST0);
4111
    st1 = floatx80_to_double(ST1);
4112

    
4113
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4114
       ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4115
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116
       return;
4117
    }
4118

    
4119
    fpsrcop = st0;
4120
    fptemp = st1;
4121
    fpsrcop1.d = ST0;
4122
    fptemp1.d = ST1;
4123
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4124

    
4125
    if (expdif < 0) {
4126
        /* optimisation? taken from the AMD docs */
4127
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4128
        /* ST0 is unchanged */
4129
        return;
4130
    }
4131

    
4132
    if ( expdif < 53 ) {
4133
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4134
        /* round dblq towards zero */
4135
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4136
        st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4137

    
4138
        /* convert dblq to q by truncating towards zero */
4139
        if (dblq < 0.0)
4140
           q = (signed long long int)(-dblq);
4141
        else
4142
           q = (signed long long int)dblq;
4143

    
4144
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4145
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4146
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4147
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4148
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4149
    } else {
4150
        int N = 32 + (expdif % 32); /* as per AMD docs */
4151
        env->fpus |= 0x400;  /* C2 <-- 1 */
4152
        fptemp = pow(2.0, (double)(expdif - N));
4153
        fpsrcop = (st0 / st1) / fptemp;
4154
        /* fpsrcop = integer obtained by chopping */
4155
        fpsrcop = (fpsrcop < 0.0) ?
4156
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4157
        st0 -= (st1 * fpsrcop * fptemp);
4158
    }
4159
    ST0 = double_to_floatx80(st0);
4160
}
4161

    
4162
void helper_fyl2xp1(void)
4163
{
4164
    double fptemp = floatx80_to_double(ST0);
4165

    
4166
    if ((fptemp+1.0)>0.0) {
4167
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4168
        fptemp *= floatx80_to_double(ST1);
4169
        ST1 = double_to_floatx80(fptemp);
4170
        fpop();
4171
    } else {
4172
        env->fpus &= (~0x4700);
4173
        env->fpus |= 0x400;
4174
    }
4175
}
4176

    
4177
void helper_fsqrt(void)
4178
{
4179
    if (floatx80_is_neg(ST0)) {
4180
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4181
        env->fpus |= 0x400;
4182
    }
4183
    ST0 = floatx80_sqrt(ST0, &env->fp_status);
4184
}
4185

    
4186
void helper_fsincos(void)
4187
{
4188
    double fptemp = floatx80_to_double(ST0);
4189

    
4190
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4191
        env->fpus |= 0x400;
4192
    } else {
4193
        ST0 = double_to_floatx80(sin(fptemp));
4194
        fpush();
4195
        ST0 = double_to_floatx80(cos(fptemp));
4196
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4197
        /* the above code is for  |arg| < 2**63 only */
4198
    }
4199
}
4200

    
4201
void helper_frndint(void)
4202
{
4203
    ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4204
}
4205

    
4206
void helper_fscale(void)
4207
{
4208
    if (floatx80_is_any_nan(ST1)) {
4209
        ST0 = ST1;
4210
    } else {
4211
        int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4212
        ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4213
    }
4214
}
4215

    
4216
void helper_fsin(void)
4217
{
4218
    double fptemp = floatx80_to_double(ST0);
4219

    
4220
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4221
        env->fpus |= 0x400;
4222
    } else {
4223
        ST0 = double_to_floatx80(sin(fptemp));
4224
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4225
        /* the above code is for  |arg| < 2**53 only */
4226
    }
4227
}
4228

    
4229
void helper_fcos(void)
4230
{
4231
    double fptemp = floatx80_to_double(ST0);
4232

    
4233
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4234
        env->fpus |= 0x400;
4235
    } else {
4236
        ST0 = double_to_floatx80(cos(fptemp));
4237
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4238
        /* the above code is for  |arg5 < 2**63 only */
4239
    }
4240
}
4241

    
4242
void helper_fxam_ST0(void)
4243
{
4244
    CPU_LDoubleU temp;
4245
    int expdif;
4246

    
4247
    temp.d = ST0;
4248

    
4249
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4250
    if (SIGND(temp))
4251
        env->fpus |= 0x200; /* C1 <-- 1 */
4252

    
4253
    /* XXX: test fptags too */
4254
    expdif = EXPD(temp);
4255
    if (expdif == MAXEXPD) {
4256
        if (MANTD(temp) == 0x8000000000000000ULL)
4257
            env->fpus |=  0x500 /*Infinity*/;
4258
        else
4259
            env->fpus |=  0x100 /*NaN*/;
4260
    } else if (expdif == 0) {
4261
        if (MANTD(temp) == 0)
4262
            env->fpus |=  0x4000 /*Zero*/;
4263
        else
4264
            env->fpus |= 0x4400 /*Denormal*/;
4265
    } else {
4266
        env->fpus |= 0x400;
4267
    }
4268
}
4269

    
4270
void helper_fstenv(target_ulong ptr, int data32)
4271
{
4272
    int fpus, fptag, exp, i;
4273
    uint64_t mant;
4274
    CPU_LDoubleU tmp;
4275

    
4276
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4277
    fptag = 0;
4278
    for (i=7; i>=0; i--) {
4279
        fptag <<= 2;
4280
        if (env->fptags[i]) {
4281
            fptag |= 3;
4282
        } else {
4283
            tmp.d = env->fpregs[i].d;
4284
            exp = EXPD(tmp);
4285
            mant = MANTD(tmp);
4286
            if (exp == 0 && mant == 0) {
4287
                /* zero */
4288
                fptag |= 1;
4289
            } else if (exp == 0 || exp == MAXEXPD
4290
                       || (mant & (1LL << 63)) == 0
4291
                       ) {
4292
                /* NaNs, infinity, denormal */
4293
                fptag |= 2;
4294
            }
4295
        }
4296
    }
4297
    if (data32) {
4298
        /* 32 bit */
4299
        stl(ptr, env->fpuc);
4300
        stl(ptr + 4, fpus);
4301
        stl(ptr + 8, fptag);
4302
        stl(ptr + 12, 0); /* fpip */
4303
        stl(ptr + 16, 0); /* fpcs */
4304
        stl(ptr + 20, 0); /* fpoo */
4305
        stl(ptr + 24, 0); /* fpos */
4306
    } else {
4307
        /* 16 bit */
4308
        stw(ptr, env->fpuc);
4309
        stw(ptr + 2, fpus);
4310
        stw(ptr + 4, fptag);
4311
        stw(ptr + 6, 0);
4312
        stw(ptr + 8, 0);
4313
        stw(ptr + 10, 0);
4314
        stw(ptr + 12, 0);
4315
    }
4316
}
4317

    
4318
void helper_fldenv(target_ulong ptr, int data32)
4319
{
4320
    int i, fpus, fptag;
4321

    
4322
    if (data32) {
4323
        env->fpuc = lduw(ptr);
4324
        fpus = lduw(ptr + 4);
4325
        fptag = lduw(ptr + 8);
4326
    }
4327
    else {
4328
        env->fpuc = lduw(ptr);
4329
        fpus = lduw(ptr + 2);
4330
        fptag = lduw(ptr + 4);
4331
    }
4332
    env->fpstt = (fpus >> 11) & 7;
4333
    env->fpus = fpus & ~0x3800;
4334
    for(i = 0;i < 8; i++) {
4335
        env->fptags[i] = ((fptag & 3) == 3);
4336
        fptag >>= 2;
4337
    }
4338
}
4339

    
4340
void helper_fsave(target_ulong ptr, int data32)
4341
{
4342
    floatx80 tmp;
4343
    int i;
4344

    
4345
    helper_fstenv(ptr, data32);
4346

    
4347
    ptr += (14 << data32);
4348
    for(i = 0;i < 8; i++) {
4349
        tmp = ST(i);
4350
        helper_fstt(tmp, ptr);
4351
        ptr += 10;
4352
    }
4353

    
4354
    /* fninit */
4355
    env->fpus = 0;
4356
    env->fpstt = 0;
4357
    env->fpuc = 0x37f;
4358
    env->fptags[0] = 1;
4359
    env->fptags[1] = 1;
4360
    env->fptags[2] = 1;
4361
    env->fptags[3] = 1;
4362
    env->fptags[4] = 1;
4363
    env->fptags[5] = 1;
4364
    env->fptags[6] = 1;
4365
    env->fptags[7] = 1;
4366
}
4367

    
4368
void helper_frstor(target_ulong ptr, int data32)
4369
{
4370
    floatx80 tmp;
4371
    int i;
4372

    
4373
    helper_fldenv(ptr, data32);
4374
    ptr += (14 << data32);
4375

    
4376
    for(i = 0;i < 8; i++) {
4377
        tmp = helper_fldt(ptr);
4378
        ST(i) = tmp;
4379
        ptr += 10;
4380
    }
4381
}
4382

    
4383
void helper_fxsave(target_ulong ptr, int data64)
4384
{
4385
    int fpus, fptag, i, nb_xmm_regs;
4386
    floatx80 tmp;
4387
    target_ulong addr;
4388

    
4389
    /* The operand must be 16 byte aligned */
4390
    if (ptr & 0xf) {
4391
        raise_exception(EXCP0D_GPF);
4392
    }
4393

    
4394
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4395
    fptag = 0;
4396
    for(i = 0; i < 8; i++) {
4397
        fptag |= (env->fptags[i] << i);
4398
    }
4399
    stw(ptr, env->fpuc);
4400
    stw(ptr + 2, fpus);
4401
    stw(ptr + 4, fptag ^ 0xff);
4402
#ifdef TARGET_X86_64
4403
    if (data64) {
4404
        stq(ptr + 0x08, 0); /* rip */
4405
        stq(ptr + 0x10, 0); /* rdp */
4406
    } else 
4407
#endif
4408
    {
4409
        stl(ptr + 0x08, 0); /* eip */
4410
        stl(ptr + 0x0c, 0); /* sel  */
4411
        stl(ptr + 0x10, 0); /* dp */
4412
        stl(ptr + 0x14, 0); /* sel  */
4413
    }
4414

    
4415
    addr = ptr + 0x20;
4416
    for(i = 0;i < 8; i++) {
4417
        tmp = ST(i);
4418
        helper_fstt(tmp, addr);
4419
        addr += 16;
4420
    }
4421

    
4422
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4423
        /* XXX: finish it */
4424
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4425
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4426
        if (env->hflags & HF_CS64_MASK)
4427
            nb_xmm_regs = 16;
4428
        else
4429
            nb_xmm_regs = 8;
4430
        addr = ptr + 0xa0;
4431
        /* Fast FXSAVE leaves out the XMM registers */
4432
        if (!(env->efer & MSR_EFER_FFXSR)
4433
          || (env->hflags & HF_CPL_MASK)
4434
          || !(env->hflags & HF_LMA_MASK)) {
4435
            for(i = 0; i < nb_xmm_regs; i++) {
4436
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4437
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4438
                addr += 16;
4439
            }
4440
        }
4441
    }
4442
}
4443

    
4444
void helper_fxrstor(target_ulong ptr, int data64)
4445
{
4446
    int i, fpus, fptag, nb_xmm_regs;
4447
    floatx80 tmp;
4448
    target_ulong addr;
4449

    
4450
    /* The operand must be 16 byte aligned */
4451
    if (ptr & 0xf) {
4452
        raise_exception(EXCP0D_GPF);
4453
    }
4454

    
4455
    env->fpuc = lduw(ptr);
4456
    fpus = lduw(ptr + 2);
4457
    fptag = lduw(ptr + 4);
4458
    env->fpstt = (fpus >> 11) & 7;
4459
    env->fpus = fpus & ~0x3800;
4460
    fptag ^= 0xff;
4461
    for(i = 0;i < 8; i++) {
4462
        env->fptags[i] = ((fptag >> i) & 1);
4463
    }
4464

    
4465
    addr = ptr + 0x20;
4466
    for(i = 0;i < 8; i++) {
4467
        tmp = helper_fldt(addr);
4468
        ST(i) = tmp;
4469
        addr += 16;
4470
    }
4471

    
4472
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4473
        /* XXX: finish it */
4474
        env->mxcsr = ldl(ptr + 0x18);
4475
        //ldl(ptr + 0x1c);
4476
        if (env->hflags & HF_CS64_MASK)
4477
            nb_xmm_regs = 16;
4478
        else
4479
            nb_xmm_regs = 8;
4480
        addr = ptr + 0xa0;
4481
        /* Fast FXRESTORE leaves out the XMM registers */
4482
        if (!(env->efer & MSR_EFER_FFXSR)
4483
          || (env->hflags & HF_CPL_MASK)
4484
          || !(env->hflags & HF_LMA_MASK)) {
4485
            for(i = 0; i < nb_xmm_regs; i++) {
4486
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4487
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4488
                addr += 16;
4489
            }
4490
        }
4491
    }
4492
}
4493

    
4494
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4495
{
4496
    CPU_LDoubleU temp;
4497

    
4498
    temp.d = f;
4499
    *pmant = temp.l.lower;
4500
    *pexp = temp.l.upper;
4501
}
4502

    
4503
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4504
{
4505
    CPU_LDoubleU temp;
4506

    
4507
    temp.l.upper = upper;
4508
    temp.l.lower = mant;
4509
    return temp.d;
4510
}
4511

    
4512
#ifdef TARGET_X86_64
4513

    
4514
//#define DEBUG_MULDIV
4515

    
4516
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4517
{
4518
    *plow += a;
4519
    /* carry test */
4520
    if (*plow < a)
4521
        (*phigh)++;
4522
    *phigh += b;
4523
}
4524

    
4525
static void neg128(uint64_t *plow, uint64_t *phigh)
4526
{
4527
    *plow = ~ *plow;
4528
    *phigh = ~ *phigh;
4529
    add128(plow, phigh, 1, 0);
4530
}
4531

    
4532
/* return TRUE if overflow */
4533
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4534
{
4535
    uint64_t q, r, a1, a0;
4536
    int i, qb, ab;
4537

    
4538
    a0 = *plow;
4539
    a1 = *phigh;
4540
    if (a1 == 0) {
4541
        q = a0 / b;
4542
        r = a0 % b;
4543
        *plow = q;
4544
        *phigh = r;
4545
    } else {
4546
        if (a1 >= b)
4547
            return 1;
4548
        /* XXX: use a better algorithm */
4549
        for(i = 0; i < 64; i++) {
4550
            ab = a1 >> 63;
4551
            a1 = (a1 << 1) | (a0 >> 63);
4552
            if (ab || a1 >= b) {
4553
                a1 -= b;
4554
                qb = 1;
4555
            } else {
4556
                qb = 0;
4557
            }
4558
            a0 = (a0 << 1) | qb;
4559
        }
4560
#if defined(DEBUG_MULDIV)
4561
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4562
               *phigh, *plow, b, a0, a1);
4563
#endif
4564
        *plow = a0;
4565
        *phigh = a1;
4566
    }
4567
    return 0;
4568
}
4569

    
4570
/* return TRUE if overflow */
4571
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4572
{
4573
    int sa, sb;
4574
    sa = ((int64_t)*phigh < 0);
4575
    if (sa)
4576
        neg128(plow, phigh);
4577
    sb = (b < 0);
4578
    if (sb)
4579
        b = -b;
4580
    if (div64(plow, phigh, b) != 0)
4581
        return 1;
4582
    if (sa ^ sb) {
4583
        if (*plow > (1ULL << 63))
4584
            return 1;
4585
        *plow = - *plow;
4586
    } else {
4587
        if (*plow >= (1ULL << 63))
4588
            return 1;
4589
    }
4590
    if (sa)
4591
        *phigh = - *phigh;
4592
    return 0;
4593
}
4594

    
4595
void helper_mulq_EAX_T0(target_ulong t0)
4596
{
4597
    uint64_t r0, r1;
4598

    
4599
    mulu64(&r0, &r1, EAX, t0);
4600
    EAX = r0;
4601
    EDX = r1;
4602
    CC_DST = r0;
4603
    CC_SRC = r1;
4604
}
4605

    
4606
void helper_imulq_EAX_T0(target_ulong t0)
4607
{
4608
    uint64_t r0, r1;
4609

    
4610
    muls64(&r0, &r1, EAX, t0);
4611
    EAX = r0;
4612
    EDX = r1;
4613
    CC_DST = r0;
4614
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4615
}
4616

    
4617
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4618
{
4619
    uint64_t r0, r1;
4620

    
4621
    muls64(&r0, &r1, t0, t1);
4622
    CC_DST = r0;
4623
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4624
    return r0;
4625
}
4626

    
4627
void helper_divq_EAX(target_ulong t0)
4628
{
4629
    uint64_t r0, r1;
4630
    if (t0 == 0) {
4631
        raise_exception(EXCP00_DIVZ);
4632
    }
4633
    r0 = EAX;
4634
    r1 = EDX;
4635
    if (div64(&r0, &r1, t0))
4636
        raise_exception(EXCP00_DIVZ);
4637
    EAX = r0;
4638
    EDX = r1;
4639
}
4640

    
4641
void helper_idivq_EAX(target_ulong t0)
4642
{
4643
    uint64_t r0, r1;
4644
    if (t0 == 0) {
4645
        raise_exception(EXCP00_DIVZ);
4646
    }
4647
    r0 = EAX;
4648
    r1 = EDX;
4649
    if (idiv64(&r0, &r1, t0))
4650
        raise_exception(EXCP00_DIVZ);
4651
    EAX = r0;
4652
    EDX = r1;
4653
}
4654
#endif
4655

    
4656
static void do_hlt(void)
4657
{
4658
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4659
    env->halted = 1;
4660
    env->exception_index = EXCP_HLT;
4661
    cpu_loop_exit();
4662
}
4663

    
4664
void helper_hlt(int next_eip_addend)
4665
{
4666
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4667
    EIP += next_eip_addend;
4668
    
4669
    do_hlt();
4670
}
4671

    
4672
void helper_monitor(target_ulong ptr)
4673
{
4674
    if ((uint32_t)ECX != 0)
4675
        raise_exception(EXCP0D_GPF);
4676
    /* XXX: store address ? */
4677
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4678
}
4679

    
4680
void helper_mwait(int next_eip_addend)
4681
{
4682
    if ((uint32_t)ECX != 0)
4683
        raise_exception(EXCP0D_GPF);
4684
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4685
    EIP += next_eip_addend;
4686

    
4687
    /* XXX: not complete but not completely erroneous */
4688
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4689
        /* more than one CPU: do not sleep because another CPU may
4690
           wake this one */
4691
    } else {
4692
        do_hlt();
4693
    }
4694
}
4695

    
4696
void helper_debug(void)
4697
{
4698
    env->exception_index = EXCP_DEBUG;
4699
    cpu_loop_exit();
4700
}
4701

    
4702
void helper_reset_rf(void)
4703
{
4704
    env->eflags &= ~RF_MASK;
4705
}
4706

    
4707
void helper_raise_interrupt(int intno, int next_eip_addend)
4708
{
4709
    raise_interrupt(intno, 1, 0, next_eip_addend);
4710
}
4711

    
4712
void helper_raise_exception(int exception_index)
4713
{
4714
    raise_exception(exception_index);
4715
}
4716

    
4717
void helper_cli(void)
4718
{
4719
    env->eflags &= ~IF_MASK;
4720
}
4721

    
4722
void helper_sti(void)
4723
{
4724
    env->eflags |= IF_MASK;
4725
}
4726

    
4727
#if 0
4728
/* vm86plus instructions */
4729
void helper_cli_vm(void)
4730
{
4731
    env->eflags &= ~VIF_MASK;
4732
}
4733

4734
void helper_sti_vm(void)
4735
{
4736
    env->eflags |= VIF_MASK;
4737
    if (env->eflags & VIP_MASK) {
4738
        raise_exception(EXCP0D_GPF);
4739
    }
4740
}
4741
#endif
4742

    
4743
void helper_set_inhibit_irq(void)
4744
{
4745
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4746
}
4747

    
4748
void helper_reset_inhibit_irq(void)
4749
{
4750
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4751
}
4752

    
4753
void helper_boundw(target_ulong a0, int v)
4754
{
4755
    int low, high;
4756
    low = ldsw(a0);
4757
    high = ldsw(a0 + 2);
4758
    v = (int16_t)v;
4759
    if (v < low || v > high) {
4760
        raise_exception(EXCP05_BOUND);
4761
    }
4762
}
4763

    
4764
void helper_boundl(target_ulong a0, int v)
4765
{
4766
    int low, high;
4767
    low = ldl(a0);
4768
    high = ldl(a0 + 4);
4769
    if (v < low || v > high) {
4770
        raise_exception(EXCP05_BOUND);
4771
    }
4772
}
4773

    
4774
#if !defined(CONFIG_USER_ONLY)
4775

    
4776
#define MMUSUFFIX _mmu
4777

    
4778
#define SHIFT 0
4779
#include "softmmu_template.h"
4780

    
4781
#define SHIFT 1
4782
#include "softmmu_template.h"
4783

    
4784
#define SHIFT 2
4785
#include "softmmu_template.h"
4786

    
4787
#define SHIFT 3
4788
#include "softmmu_template.h"
4789

    
4790
#endif
4791

    
4792
#if !defined(CONFIG_USER_ONLY)
4793
/* try to fill the TLB and return an exception if error. If retaddr is
4794
   NULL, it means that the function was called in C code (i.e. not
4795
   from generated code or from helper.c) */
4796
/* XXX: fix it to restore all registers */
4797
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4798
{
4799
    TranslationBlock *tb;
4800
    int ret;
4801
    unsigned long pc;
4802
    CPUX86State *saved_env;
4803

    
4804
    /* XXX: hack to restore env in all cases, even if not called from
4805
       generated code */
4806
    saved_env = env;
4807
    env = cpu_single_env;
4808

    
4809
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4810
    if (ret) {
4811
        if (retaddr) {
4812
            /* now we have a real cpu fault */
4813
            pc = (unsigned long)retaddr;
4814
            tb = tb_find_pc(pc);
4815
            if (tb) {
4816
                /* the PC is inside the translated code. It means that we have
4817
                   a virtual CPU fault */
4818
                cpu_restore_state(tb, env, pc);
4819
            }
4820
        }
4821
        raise_exception_err(env->exception_index, env->error_code);
4822
    }
4823
    env = saved_env;
4824
}
4825
#endif
4826

    
4827
/* Secure Virtual Machine helpers */
4828

    
4829
#if defined(CONFIG_USER_ONLY)
4830

    
4831
void helper_vmrun(int aflag, int next_eip_addend)
4832
{ 
4833
}
4834
void helper_vmmcall(void) 
4835
{ 
4836
}
4837
void helper_vmload(int aflag)
4838
{ 
4839
}
4840
void helper_vmsave(int aflag)
4841
{ 
4842
}
4843
void helper_stgi(void)
4844
{
4845
}
4846
void helper_clgi(void)
4847
{
4848
}
4849
void helper_skinit(void) 
4850
{ 
4851
}
4852
void helper_invlpga(int aflag)
4853
{ 
4854
}
4855
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4856
{ 
4857
}
4858
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4859
{
4860
}
4861

    
4862
void helper_svm_check_io(uint32_t port, uint32_t param, 
4863
                         uint32_t next_eip_addend)
4864
{
4865
}
4866
#else
4867

    
4868
static inline void svm_save_seg(target_phys_addr_t addr,
4869
                                const SegmentCache *sc)
4870
{
4871
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4872
             sc->selector);
4873
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4874
             sc->base);
4875
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4876
             sc->limit);
4877
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4878
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4879
}
4880
                                
4881
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4882
{
4883
    unsigned int flags;
4884

    
4885
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4886
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4887
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4888
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4889
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4890
}
4891

    
4892
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4893
                                      CPUState *env, int seg_reg)
4894
{
4895
    SegmentCache sc1, *sc = &sc1;
4896
    svm_load_seg(addr, sc);
4897
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4898
                           sc->base, sc->limit, sc->flags);
4899
}
4900

    
4901
void helper_vmrun(int aflag, int next_eip_addend)
4902
{
4903
    target_ulong addr;
4904
    uint32_t event_inj;
4905
    uint32_t int_ctl;
4906

    
4907
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4908

    
4909
    if (aflag == 2)
4910
        addr = EAX;
4911
    else
4912
        addr = (uint32_t)EAX;
4913

    
4914
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4915

    
4916
    env->vm_vmcb = addr;
4917

    
4918
    /* save the current CPU state in the hsave page */
4919
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4920
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4921

    
4922
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4923
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4924

    
4925
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4926
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4927
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4928
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4929
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4930
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4931

    
4932
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4933
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4934

    
4935
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4936
                  &env->segs[R_ES]);
4937
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4938
                 &env->segs[R_CS]);
4939
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4940
                 &env->segs[R_SS]);
4941
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4942
                 &env->segs[R_DS]);
4943

    
4944
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4945
             EIP + next_eip_addend);
4946
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4948

    
4949
    /* load the interception bitmaps so we do not need to access the
4950
       vmcb in svm mode */
4951
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4952
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4953
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4954
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4955
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4956
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4957

    
4958
    /* enable intercepts */
4959
    env->hflags |= HF_SVMI_MASK;
4960

    
4961
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4962

    
4963
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4964
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4965

    
4966
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4967
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4968

    
4969
    /* clear exit_info_2 so we behave like the real hardware */
4970
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4971

    
4972
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4973
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4974
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4975
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4976
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4977
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4978
    if (int_ctl & V_INTR_MASKING_MASK) {
4979
        env->v_tpr = int_ctl & V_TPR_MASK;
4980
        env->hflags2 |= HF2_VINTR_MASK;
4981
        if (env->eflags & IF_MASK)
4982
            env->hflags2 |= HF2_HIF_MASK;
4983
    }
4984

    
4985
    cpu_load_efer(env, 
4986
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4987
    env->eflags = 0;
4988
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4989
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4990
    CC_OP = CC_OP_EFLAGS;
4991

    
4992
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4993
                       env, R_ES);
4994
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4995
                       env, R_CS);
4996
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4997
                       env, R_SS);
4998
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4999
                       env, R_DS);
5000

    
5001
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5002
    env->eip = EIP;
5003
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5004
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5005
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5006
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5007
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5008

    
5009
    /* FIXME: guest state consistency checks */
5010

    
5011
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5012
        case TLB_CONTROL_DO_NOTHING:
5013
            break;
5014
        case TLB_CONTROL_FLUSH_ALL_ASID:
5015
            /* FIXME: this is not 100% correct but should work for now */
5016
            tlb_flush(env, 1);
5017
        break;
5018
    }
5019

    
5020
    env->hflags2 |= HF2_GIF_MASK;
5021

    
5022
    if (int_ctl & V_IRQ_MASK) {
5023
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5024
    }
5025

    
5026
    /* maybe we need to inject an event */
5027
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5028
    if (event_inj & SVM_EVTINJ_VALID) {
5029
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5030
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5031
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5032

    
5033
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5034
        /* FIXME: need to implement valid_err */
5035
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5036
        case SVM_EVTINJ_TYPE_INTR:
5037
                env->exception_index = vector;
5038
                env->error_code = event_inj_err;
5039
                env->exception_is_int = 0;
5040
                env->exception_next_eip = -1;
5041
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5042
                /* XXX: is it always correct ? */
5043
                do_interrupt(vector, 0, 0, 0, 1);
5044
                break;
5045
        case SVM_EVTINJ_TYPE_NMI:
5046
                env->exception_index = EXCP02_NMI;
5047
                env->error_code = event_inj_err;
5048
                env->exception_is_int = 0;