Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ be1c17c7

History | View | Annotate | Download (159.7 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352
    /* XXX: avoid a compiler warning, see
353
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
354
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
355
    (void)new_trap;
356

    
357
    /* NOTE: we must avoid memory exceptions during the task switch,
358
       so we make dummy accesses before */
359
    /* XXX: it can still fail in some cases, so a bigger hack is
360
       necessary to valid the TLB after having done the accesses */
361

    
362
    v1 = ldub_kernel(env->tr.base);
363
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
364
    stb_kernel(env->tr.base, v1);
365
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
366

    
367
    /* clear busy bit (it is restartable) */
368
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
369
        target_ulong ptr;
370
        uint32_t e2;
371
        ptr = env->gdt.base + (env->tr.selector & ~7);
372
        e2 = ldl_kernel(ptr + 4);
373
        e2 &= ~DESC_TSS_BUSY_MASK;
374
        stl_kernel(ptr + 4, e2);
375
    }
376
    old_eflags = compute_eflags();
377
    if (source == SWITCH_TSS_IRET)
378
        old_eflags &= ~NT_MASK;
379

    
380
    /* save the current state in the old TSS */
381
    if (type & 8) {
382
        /* 32 bit */
383
        stl_kernel(env->tr.base + 0x20, next_eip);
384
        stl_kernel(env->tr.base + 0x24, old_eflags);
385
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
386
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
387
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
388
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
389
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
390
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
391
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
392
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
393
        for(i = 0; i < 6; i++)
394
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
395
    } else {
396
        /* 16 bit */
397
        stw_kernel(env->tr.base + 0x0e, next_eip);
398
        stw_kernel(env->tr.base + 0x10, old_eflags);
399
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
400
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
401
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
402
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
403
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
404
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
405
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
406
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
407
        for(i = 0; i < 4; i++)
408
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
409
    }
410

    
411
    /* now if an exception occurs, it will occurs in the next task
412
       context */
413

    
414
    if (source == SWITCH_TSS_CALL) {
415
        stw_kernel(tss_base, env->tr.selector);
416
        new_eflags |= NT_MASK;
417
    }
418

    
419
    /* set busy bit */
420
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
421
        target_ulong ptr;
422
        uint32_t e2;
423
        ptr = env->gdt.base + (tss_selector & ~7);
424
        e2 = ldl_kernel(ptr + 4);
425
        e2 |= DESC_TSS_BUSY_MASK;
426
        stl_kernel(ptr + 4, e2);
427
    }
428

    
429
    /* set the new CPU state */
430
    /* from this point, any exception which occurs can give problems */
431
    env->cr[0] |= CR0_TS_MASK;
432
    env->hflags |= HF_TS_MASK;
433
    env->tr.selector = tss_selector;
434
    env->tr.base = tss_base;
435
    env->tr.limit = tss_limit;
436
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
437

    
438
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
439
        cpu_x86_update_cr3(env, new_cr3);
440
    }
441

    
442
    /* load all registers without an exception, then reload them with
443
       possible exception */
444
    env->eip = new_eip;
445
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
446
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
447
    if (!(type & 8))
448
        eflags_mask &= 0xffff;
449
    load_eflags(new_eflags, eflags_mask);
450
    /* XXX: what to do in 16 bit case ? */
451
    EAX = new_regs[0];
452
    ECX = new_regs[1];
453
    EDX = new_regs[2];
454
    EBX = new_regs[3];
455
    ESP = new_regs[4];
456
    EBP = new_regs[5];
457
    ESI = new_regs[6];
458
    EDI = new_regs[7];
459
    if (new_eflags & VM_MASK) {
460
        for(i = 0; i < 6; i++)
461
            load_seg_vm(i, new_segs[i]);
462
        /* in vm86, CPL is always 3 */
463
        cpu_x86_set_cpl(env, 3);
464
    } else {
465
        /* CPL is set the RPL of CS */
466
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
467
        /* first just selectors as the rest may trigger exceptions */
468
        for(i = 0; i < 6; i++)
469
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
470
    }
471

    
472
    env->ldt.selector = new_ldt & ~4;
473
    env->ldt.base = 0;
474
    env->ldt.limit = 0;
475
    env->ldt.flags = 0;
476

    
477
    /* load the LDT */
478
    if (new_ldt & 4)
479
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480

    
481
    if ((new_ldt & 0xfffc) != 0) {
482
        dt = &env->gdt;
483
        index = new_ldt & ~7;
484
        if ((index + 7) > dt->limit)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        ptr = dt->base + index;
487
        e1 = ldl_kernel(ptr);
488
        e2 = ldl_kernel(ptr + 4);
489
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
490
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491
        if (!(e2 & DESC_P_MASK))
492
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
494
    }
495

    
496
    /* load the segments */
497
    if (!(new_eflags & VM_MASK)) {
498
        tss_load_seg(R_CS, new_segs[R_CS]);
499
        tss_load_seg(R_SS, new_segs[R_SS]);
500
        tss_load_seg(R_ES, new_segs[R_ES]);
501
        tss_load_seg(R_DS, new_segs[R_DS]);
502
        tss_load_seg(R_FS, new_segs[R_FS]);
503
        tss_load_seg(R_GS, new_segs[R_GS]);
504
    }
505

    
506
    /* check that EIP is in the CS segment limits */
507
    if (new_eip > env->segs[R_CS].limit) {
508
        /* XXX: different exception if CALL ? */
509
        raise_exception_err(EXCP0D_GPF, 0);
510
    }
511

    
512
#ifndef CONFIG_USER_ONLY
513
    /* reset local breakpoints */
514
    if (env->dr[7] & 0x55) {
515
        for (i = 0; i < 4; i++) {
516
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
517
                hw_breakpoint_remove(env, i);
518
        }
519
        env->dr[7] &= ~0x55;
520
    }
521
#endif
522
}
523

    
524
/* check if Port I/O is allowed in TSS */
525
static inline void check_io(int addr, int size)
526
{
527
    int io_offset, val, mask;
528

    
529
    /* TSS must be a valid 32 bit one */
530
    if (!(env->tr.flags & DESC_P_MASK) ||
531
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
532
        env->tr.limit < 103)
533
        goto fail;
534
    io_offset = lduw_kernel(env->tr.base + 0x66);
535
    io_offset += (addr >> 3);
536
    /* Note: the check needs two bytes */
537
    if ((io_offset + 1) > env->tr.limit)
538
        goto fail;
539
    val = lduw_kernel(env->tr.base + io_offset);
540
    val >>= (addr & 7);
541
    mask = (1 << size) - 1;
542
    /* all bits must be zero to allow the I/O */
543
    if ((val & mask) != 0) {
544
    fail:
545
        raise_exception_err(EXCP0D_GPF, 0);
546
    }
547
}
548

    
549
void helper_check_iob(uint32_t t0)
550
{
551
    check_io(t0, 1);
552
}
553

    
554
void helper_check_iow(uint32_t t0)
555
{
556
    check_io(t0, 2);
557
}
558

    
559
void helper_check_iol(uint32_t t0)
560
{
561
    check_io(t0, 4);
562
}
563

    
564
void helper_outb(uint32_t port, uint32_t data)
565
{
566
    cpu_outb(port, data & 0xff);
567
}
568

    
569
target_ulong helper_inb(uint32_t port)
570
{
571
    return cpu_inb(port);
572
}
573

    
574
void helper_outw(uint32_t port, uint32_t data)
575
{
576
    cpu_outw(port, data & 0xffff);
577
}
578

    
579
target_ulong helper_inw(uint32_t port)
580
{
581
    return cpu_inw(port);
582
}
583

    
584
void helper_outl(uint32_t port, uint32_t data)
585
{
586
    cpu_outl(port, data);
587
}
588

    
589
target_ulong helper_inl(uint32_t port)
590
{
591
    return cpu_inl(port);
592
}
593

    
594
static inline unsigned int get_sp_mask(unsigned int e2)
595
{
596
    if (e2 & DESC_B_MASK)
597
        return 0xffffffff;
598
    else
599
        return 0xffff;
600
}
601

    
602
static int exeption_has_error_code(int intno)
603
{
604
        switch(intno) {
605
        case 8:
606
        case 10:
607
        case 11:
608
        case 12:
609
        case 13:
610
        case 14:
611
        case 17:
612
            return 1;
613
        }
614
        return 0;
615
}
616

    
617
#ifdef TARGET_X86_64
618
#define SET_ESP(val, sp_mask)\
619
do {\
620
    if ((sp_mask) == 0xffff)\
621
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622
    else if ((sp_mask) == 0xffffffffLL)\
623
        ESP = (uint32_t)(val);\
624
    else\
625
        ESP = (val);\
626
} while (0)
627
#else
628
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
629
#endif
630

    
631
/* in 64-bit machines, this can overflow. So this segment addition macro
632
 * can be used to trim the value to 32-bit whenever needed */
633
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
634

    
635
/* XXX: add a is_user flag to have proper security support */
636
#define PUSHW(ssp, sp, sp_mask, val)\
637
{\
638
    sp -= 2;\
639
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
640
}
641

    
642
#define PUSHL(ssp, sp, sp_mask, val)\
643
{\
644
    sp -= 4;\
645
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
646
}
647

    
648
#define POPW(ssp, sp, sp_mask, val)\
649
{\
650
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
651
    sp += 2;\
652
}
653

    
654
#define POPL(ssp, sp, sp_mask, val)\
655
{\
656
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
657
    sp += 4;\
658
}
659

    
660
/* protected mode interrupt */
661
static void do_interrupt_protected(int intno, int is_int, int error_code,
662
                                   unsigned int next_eip, int is_hw)
663
{
664
    SegmentCache *dt;
665
    target_ulong ptr, ssp;
666
    int type, dpl, selector, ss_dpl, cpl;
667
    int has_error_code, new_stack, shift;
668
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
669
    uint32_t old_eip, sp_mask;
670

    
671
    has_error_code = 0;
672
    if (!is_int && !is_hw)
673
        has_error_code = exeption_has_error_code(intno);
674
    if (is_int)
675
        old_eip = next_eip;
676
    else
677
        old_eip = env->eip;
678

    
679
    dt = &env->idt;
680
    if (intno * 8 + 7 > dt->limit)
681
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
682
    ptr = dt->base + intno * 8;
683
    e1 = ldl_kernel(ptr);
684
    e2 = ldl_kernel(ptr + 4);
685
    /* check gate type */
686
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
687
    switch(type) {
688
    case 5: /* task gate */
689
        /* must do that check here to return the correct error code */
690
        if (!(e2 & DESC_P_MASK))
691
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
692
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
693
        if (has_error_code) {
694
            int type;
695
            uint32_t mask;
696
            /* push the error code */
697
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
698
            shift = type >> 3;
699
            if (env->segs[R_SS].flags & DESC_B_MASK)
700
                mask = 0xffffffff;
701
            else
702
                mask = 0xffff;
703
            esp = (ESP - (2 << shift)) & mask;
704
            ssp = env->segs[R_SS].base + esp;
705
            if (shift)
706
                stl_kernel(ssp, error_code);
707
            else
708
                stw_kernel(ssp, error_code);
709
            SET_ESP(esp, mask);
710
        }
711
        return;
712
    case 6: /* 286 interrupt gate */
713
    case 7: /* 286 trap gate */
714
    case 14: /* 386 interrupt gate */
715
    case 15: /* 386 trap gate */
716
        break;
717
    default:
718
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719
        break;
720
    }
721
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
722
    cpl = env->hflags & HF_CPL_MASK;
723
    /* check privilege if software int */
724
    if (is_int && dpl < cpl)
725
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
726
    /* check valid bit */
727
    if (!(e2 & DESC_P_MASK))
728
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
729
    selector = e1 >> 16;
730
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
731
    if ((selector & 0xfffc) == 0)
732
        raise_exception_err(EXCP0D_GPF, 0);
733

    
734
    if (load_segment(&e1, &e2, selector) != 0)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
737
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
739
    if (dpl > cpl)
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
    if (!(e2 & DESC_P_MASK))
742
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
743
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
744
        /* to inner privilege */
745
        get_ss_esp_from_tss(&ss, &esp, dpl);
746
        if ((ss & 0xfffc) == 0)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        if ((ss & 3) != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
751
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
753
        if (ss_dpl != dpl)
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        if (!(ss_e2 & DESC_S_MASK) ||
756
            (ss_e2 & DESC_CS_MASK) ||
757
            !(ss_e2 & DESC_W_MASK))
758
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759
        if (!(ss_e2 & DESC_P_MASK))
760
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761
        new_stack = 1;
762
        sp_mask = get_sp_mask(ss_e2);
763
        ssp = get_seg_base(ss_e1, ss_e2);
764
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
765
        /* to same privilege */
766
        if (env->eflags & VM_MASK)
767
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768
        new_stack = 0;
769
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
770
        ssp = env->segs[R_SS].base;
771
        esp = ESP;
772
        dpl = cpl;
773
    } else {
774
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
775
        new_stack = 0; /* avoid warning */
776
        sp_mask = 0; /* avoid warning */
777
        ssp = 0; /* avoid warning */
778
        esp = 0; /* avoid warning */
779
    }
780

    
781
    shift = type >> 3;
782

    
783
#if 0
784
    /* XXX: check that enough room is available */
785
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786
    if (env->eflags & VM_MASK)
787
        push_size += 8;
788
    push_size <<= shift;
789
#endif
790
    if (shift == 1) {
791
        if (new_stack) {
792
            if (env->eflags & VM_MASK) {
793
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
794
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
795
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
796
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
797
            }
798
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
799
            PUSHL(ssp, esp, sp_mask, ESP);
800
        }
801
        PUSHL(ssp, esp, sp_mask, compute_eflags());
802
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
803
        PUSHL(ssp, esp, sp_mask, old_eip);
804
        if (has_error_code) {
805
            PUSHL(ssp, esp, sp_mask, error_code);
806
        }
807
    } else {
808
        if (new_stack) {
809
            if (env->eflags & VM_MASK) {
810
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
811
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
812
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
813
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
814
            }
815
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
816
            PUSHW(ssp, esp, sp_mask, ESP);
817
        }
818
        PUSHW(ssp, esp, sp_mask, compute_eflags());
819
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
820
        PUSHW(ssp, esp, sp_mask, old_eip);
821
        if (has_error_code) {
822
            PUSHW(ssp, esp, sp_mask, error_code);
823
        }
824
    }
825

    
826
    if (new_stack) {
827
        if (env->eflags & VM_MASK) {
828
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
829
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
830
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
831
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
832
        }
833
        ss = (ss & ~3) | dpl;
834
        cpu_x86_load_seg_cache(env, R_SS, ss,
835
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
836
    }
837
    SET_ESP(esp, sp_mask);
838

    
839
    selector = (selector & ~3) | dpl;
840
    cpu_x86_load_seg_cache(env, R_CS, selector,
841
                   get_seg_base(e1, e2),
842
                   get_seg_limit(e1, e2),
843
                   e2);
844
    cpu_x86_set_cpl(env, dpl);
845
    env->eip = offset;
846

    
847
    /* interrupt gate clear IF mask */
848
    if ((type & 1) == 0) {
849
        env->eflags &= ~IF_MASK;
850
    }
851
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852
}
853

    
854
#ifdef TARGET_X86_64
855

    
856
#define PUSHQ(sp, val)\
857
{\
858
    sp -= 8;\
859
    stq_kernel(sp, (val));\
860
}
861

    
862
#define POPQ(sp, val)\
863
{\
864
    val = ldq_kernel(sp);\
865
    sp += 8;\
866
}
867

    
868
static inline target_ulong get_rsp_from_tss(int level)
869
{
870
    int index;
871

    
872
#if 0
873
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
874
           env->tr.base, env->tr.limit);
875
#endif
876

    
877
    if (!(env->tr.flags & DESC_P_MASK))
878
        cpu_abort(env, "invalid tss");
879
    index = 8 * level + 4;
880
    if ((index + 7) > env->tr.limit)
881
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
882
    return ldq_kernel(env->tr.base + index);
883
}
884

    
885
/* 64 bit interrupt */
886
static void do_interrupt64(int intno, int is_int, int error_code,
887
                           target_ulong next_eip, int is_hw)
888
{
889
    SegmentCache *dt;
890
    target_ulong ptr;
891
    int type, dpl, selector, cpl, ist;
892
    int has_error_code, new_stack;
893
    uint32_t e1, e2, e3, ss;
894
    target_ulong old_eip, esp, offset;
895

    
896
    has_error_code = 0;
897
    if (!is_int && !is_hw)
898
        has_error_code = exeption_has_error_code(intno);
899
    if (is_int)
900
        old_eip = next_eip;
901
    else
902
        old_eip = env->eip;
903

    
904
    dt = &env->idt;
905
    if (intno * 16 + 15 > dt->limit)
906
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
907
    ptr = dt->base + intno * 16;
908
    e1 = ldl_kernel(ptr);
909
    e2 = ldl_kernel(ptr + 4);
910
    e3 = ldl_kernel(ptr + 8);
911
    /* check gate type */
912
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
913
    switch(type) {
914
    case 14: /* 386 interrupt gate */
915
    case 15: /* 386 trap gate */
916
        break;
917
    default:
918
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919
        break;
920
    }
921
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922
    cpl = env->hflags & HF_CPL_MASK;
923
    /* check privilege if software int */
924
    if (is_int && dpl < cpl)
925
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
926
    /* check valid bit */
927
    if (!(e2 & DESC_P_MASK))
928
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
929
    selector = e1 >> 16;
930
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
931
    ist = e2 & 7;
932
    if ((selector & 0xfffc) == 0)
933
        raise_exception_err(EXCP0D_GPF, 0);
934

    
935
    if (load_segment(&e1, &e2, selector) != 0)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940
    if (dpl > cpl)
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if (!(e2 & DESC_P_MASK))
943
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
944
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
945
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
947
        /* to inner privilege */
948
        if (ist != 0)
949
            esp = get_rsp_from_tss(ist + 3);
950
        else
951
            esp = get_rsp_from_tss(dpl);
952
        esp &= ~0xfLL; /* align stack */
953
        ss = 0;
954
        new_stack = 1;
955
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
956
        /* to same privilege */
957
        if (env->eflags & VM_MASK)
958
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
959
        new_stack = 0;
960
        if (ist != 0)
961
            esp = get_rsp_from_tss(ist + 3);
962
        else
963
            esp = ESP;
964
        esp &= ~0xfLL; /* align stack */
965
        dpl = cpl;
966
    } else {
967
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968
        new_stack = 0; /* avoid warning */
969
        esp = 0; /* avoid warning */
970
    }
971

    
972
    PUSHQ(esp, env->segs[R_SS].selector);
973
    PUSHQ(esp, ESP);
974
    PUSHQ(esp, compute_eflags());
975
    PUSHQ(esp, env->segs[R_CS].selector);
976
    PUSHQ(esp, old_eip);
977
    if (has_error_code) {
978
        PUSHQ(esp, error_code);
979
    }
980

    
981
    if (new_stack) {
982
        ss = 0 | dpl;
983
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
984
    }
985
    ESP = esp;
986

    
987
    selector = (selector & ~3) | dpl;
988
    cpu_x86_load_seg_cache(env, R_CS, selector,
989
                   get_seg_base(e1, e2),
990
                   get_seg_limit(e1, e2),
991
                   e2);
992
    cpu_x86_set_cpl(env, dpl);
993
    env->eip = offset;
994

    
995
    /* interrupt gate clear IF mask */
996
    if ((type & 1) == 0) {
997
        env->eflags &= ~IF_MASK;
998
    }
999
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1000
}
1001
#endif
1002

    
1003
#ifdef TARGET_X86_64
1004
#if defined(CONFIG_USER_ONLY)
1005
void helper_syscall(int next_eip_addend)
1006
{
1007
    env->exception_index = EXCP_SYSCALL;
1008
    env->exception_next_eip = env->eip + next_eip_addend;
1009
    cpu_loop_exit();
1010
}
1011
#else
1012
void helper_syscall(int next_eip_addend)
1013
{
1014
    int selector;
1015

    
1016
    if (!(env->efer & MSR_EFER_SCE)) {
1017
        raise_exception_err(EXCP06_ILLOP, 0);
1018
    }
1019
    selector = (env->star >> 32) & 0xffff;
1020
    if (env->hflags & HF_LMA_MASK) {
1021
        int code64;
1022

    
1023
        ECX = env->eip + next_eip_addend;
1024
        env->regs[11] = compute_eflags();
1025

    
1026
        code64 = env->hflags & HF_CS64_MASK;
1027

    
1028
        cpu_x86_set_cpl(env, 0);
1029
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030
                           0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1034
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035
                               0, 0xffffffff,
1036
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037
                               DESC_S_MASK |
1038
                               DESC_W_MASK | DESC_A_MASK);
1039
        env->eflags &= ~env->fmask;
1040
        load_eflags(env->eflags, 0);
1041
        if (code64)
1042
            env->eip = env->lstar;
1043
        else
1044
            env->eip = env->cstar;
1045
    } else {
1046
        ECX = (uint32_t)(env->eip + next_eip_addend);
1047

    
1048
        cpu_x86_set_cpl(env, 0);
1049
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1050
                           0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1055
                               0, 0xffffffff,
1056
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057
                               DESC_S_MASK |
1058
                               DESC_W_MASK | DESC_A_MASK);
1059
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1060
        env->eip = (uint32_t)env->star;
1061
    }
1062
}
1063
#endif
1064
#endif
1065

    
1066
#ifdef TARGET_X86_64
1067
void helper_sysret(int dflag)
1068
{
1069
    int cpl, selector;
1070

    
1071
    if (!(env->efer & MSR_EFER_SCE)) {
1072
        raise_exception_err(EXCP06_ILLOP, 0);
1073
    }
1074
    cpl = env->hflags & HF_CPL_MASK;
1075
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1076
        raise_exception_err(EXCP0D_GPF, 0);
1077
    }
1078
    selector = (env->star >> 48) & 0xffff;
1079
    if (env->hflags & HF_LMA_MASK) {
1080
        if (dflag == 2) {
1081
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1082
                                   0, 0xffffffff,
1083
                                   DESC_G_MASK | DESC_P_MASK |
1084
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1086
                                   DESC_L_MASK);
1087
            env->eip = ECX;
1088
        } else {
1089
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090
                                   0, 0xffffffff,
1091
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1094
            env->eip = (uint32_t)ECX;
1095
        }
1096
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1097
                               0, 0xffffffff,
1098
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1100
                               DESC_W_MASK | DESC_A_MASK);
1101
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1102
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1103
        cpu_x86_set_cpl(env, 3);
1104
    } else {
1105
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1106
                               0, 0xffffffff,
1107
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1110
        env->eip = (uint32_t)ECX;
1111
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1112
                               0, 0xffffffff,
1113
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1114
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1115
                               DESC_W_MASK | DESC_A_MASK);
1116
        env->eflags |= IF_MASK;
1117
        cpu_x86_set_cpl(env, 3);
1118
    }
1119
}
1120
#endif
1121

    
1122
/* real mode interrupt */
1123
static void do_interrupt_real(int intno, int is_int, int error_code,
1124
                              unsigned int next_eip)
1125
{
1126
    SegmentCache *dt;
1127
    target_ulong ptr, ssp;
1128
    int selector;
1129
    uint32_t offset, esp;
1130
    uint32_t old_cs, old_eip;
1131

    
1132
    /* real mode (simpler !) */
1133
    dt = &env->idt;
1134
    if (intno * 4 + 3 > dt->limit)
1135
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1136
    ptr = dt->base + intno * 4;
1137
    offset = lduw_kernel(ptr);
1138
    selector = lduw_kernel(ptr + 2);
1139
    esp = ESP;
1140
    ssp = env->segs[R_SS].base;
1141
    if (is_int)
1142
        old_eip = next_eip;
1143
    else
1144
        old_eip = env->eip;
1145
    old_cs = env->segs[R_CS].selector;
1146
    /* XXX: use SS segment size ? */
1147
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1148
    PUSHW(ssp, esp, 0xffff, old_cs);
1149
    PUSHW(ssp, esp, 0xffff, old_eip);
1150

    
1151
    /* update processor state */
1152
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1153
    env->eip = offset;
1154
    env->segs[R_CS].selector = selector;
1155
    env->segs[R_CS].base = (selector << 4);
1156
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1157
}
1158

    
1159
/* fake user mode interrupt */
1160
void do_interrupt_user(int intno, int is_int, int error_code,
1161
                       target_ulong next_eip)
1162
{
1163
    SegmentCache *dt;
1164
    target_ulong ptr;
1165
    int dpl, cpl, shift;
1166
    uint32_t e2;
1167

    
1168
    dt = &env->idt;
1169
    if (env->hflags & HF_LMA_MASK) {
1170
        shift = 4;
1171
    } else {
1172
        shift = 3;
1173
    }
1174
    ptr = dt->base + (intno << shift);
1175
    e2 = ldl_kernel(ptr + 4);
1176

    
1177
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
    cpl = env->hflags & HF_CPL_MASK;
1179
    /* check privilege if software int */
1180
    if (is_int && dpl < cpl)
1181
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1182

    
1183
    /* Since we emulate only user space, we cannot do more than
1184
       exiting the emulation with the suitable exception and error
1185
       code */
1186
    if (is_int)
1187
        EIP = next_eip;
1188
}
1189

    
1190
#if !defined(CONFIG_USER_ONLY)
1191
static void handle_even_inj(int intno, int is_int, int error_code,
1192
                int is_hw, int rm)
1193
{
1194
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1195
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1196
            int type;
1197
            if (is_int)
1198
                    type = SVM_EVTINJ_TYPE_SOFT;
1199
            else
1200
                    type = SVM_EVTINJ_TYPE_EXEPT;
1201
            event_inj = intno | type | SVM_EVTINJ_VALID;
1202
            if (!rm && exeption_has_error_code(intno)) {
1203
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1204
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1205
            }
1206
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207
    }
1208
}
1209
#endif
1210

    
1211
/*
1212
 * Begin execution of an interruption. is_int is TRUE if coming from
1213
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214
 * instruction. It is only relevant if is_int is TRUE.
1215
 */
1216
void do_interrupt(int intno, int is_int, int error_code,
1217
                  target_ulong next_eip, int is_hw)
1218
{
1219
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1220
        if ((env->cr[0] & CR0_PE_MASK)) {
1221
            static int count;
1222
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223
                    count, intno, error_code, is_int,
1224
                    env->hflags & HF_CPL_MASK,
1225
                    env->segs[R_CS].selector, EIP,
1226
                    (int)env->segs[R_CS].base + EIP,
1227
                    env->segs[R_SS].selector, ESP);
1228
            if (intno == 0x0e) {
1229
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1230
            } else {
1231
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1232
            }
1233
            qemu_log("\n");
1234
            log_cpu_state(env, X86_DUMP_CCOP);
1235
#if 0
1236
            {
1237
                int i;
1238
                target_ulong ptr;
1239
                qemu_log("       code=");
1240
                ptr = env->segs[R_CS].base + env->eip;
1241
                for(i = 0; i < 16; i++) {
1242
                    qemu_log(" %02x", ldub(ptr + i));
1243
                }
1244
                qemu_log("\n");
1245
            }
1246
#endif
1247
            count++;
1248
        }
1249
    }
1250
    if (env->cr[0] & CR0_PE_MASK) {
1251
#if !defined(CONFIG_USER_ONLY)
1252
        if (env->hflags & HF_SVMI_MASK)
1253
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1254
#endif
1255
#ifdef TARGET_X86_64
1256
        if (env->hflags & HF_LMA_MASK) {
1257
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258
        } else
1259
#endif
1260
        {
1261
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262
        }
1263
    } else {
1264
#if !defined(CONFIG_USER_ONLY)
1265
        if (env->hflags & HF_SVMI_MASK)
1266
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1267
#endif
1268
        do_interrupt_real(intno, is_int, error_code, next_eip);
1269
    }
1270

    
1271
#if !defined(CONFIG_USER_ONLY)
1272
    if (env->hflags & HF_SVMI_MASK) {
1273
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1274
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1275
    }
1276
#endif
1277
}
1278

    
1279
/* This should come from sysemu.h - if we could include it here... */
1280
void qemu_system_reset_request(void);
1281

    
1282
/*
1283
 * Check nested exceptions and change to double or triple fault if
1284
 * needed. It should only be called, if this is not an interrupt.
1285
 * Returns the new exception number.
1286
 */
1287
static int check_exception(int intno, int *error_code)
1288
{
1289
    int first_contributory = env->old_exception == 0 ||
1290
                              (env->old_exception >= 10 &&
1291
                               env->old_exception <= 13);
1292
    int second_contributory = intno == 0 ||
1293
                               (intno >= 10 && intno <= 13);
1294

    
1295
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1296
                env->old_exception, intno);
1297

    
1298
#if !defined(CONFIG_USER_ONLY)
1299
    if (env->old_exception == EXCP08_DBLE) {
1300
        if (env->hflags & HF_SVMI_MASK)
1301
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1302

    
1303
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1304

    
1305
        qemu_system_reset_request();
1306
        return EXCP_HLT;
1307
    }
1308
#endif
1309

    
1310
    if ((first_contributory && second_contributory)
1311
        || (env->old_exception == EXCP0E_PAGE &&
1312
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1313
        intno = EXCP08_DBLE;
1314
        *error_code = 0;
1315
    }
1316

    
1317
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1318
        (intno == EXCP08_DBLE))
1319
        env->old_exception = intno;
1320

    
1321
    return intno;
1322
}
1323

    
1324
/*
1325
 * Signal an interruption. It is executed in the main CPU loop.
1326
 * is_int is TRUE if coming from the int instruction. next_eip is the
1327
 * EIP value AFTER the interrupt instruction. It is only relevant if
1328
 * is_int is TRUE.
1329
 */
1330
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1331
                                          int next_eip_addend)
1332
{
1333
    if (!is_int) {
1334
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1335
        intno = check_exception(intno, &error_code);
1336
    } else {
1337
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1338
    }
1339

    
1340
    env->exception_index = intno;
1341
    env->error_code = error_code;
1342
    env->exception_is_int = is_int;
1343
    env->exception_next_eip = env->eip + next_eip_addend;
1344
    cpu_loop_exit();
1345
}
1346

    
1347
/* shortcuts to generate exceptions */
1348

    
1349
void raise_exception_err(int exception_index, int error_code)
1350
{
1351
    raise_interrupt(exception_index, 0, error_code, 0);
1352
}
1353

    
1354
void raise_exception(int exception_index)
1355
{
1356
    raise_interrupt(exception_index, 0, 0, 0);
1357
}
1358

    
1359
void raise_exception_env(int exception_index, CPUState *nenv)
1360
{
1361
    env = nenv;
1362
    raise_exception(exception_index);
1363
}
1364
/* SMM support */
1365

    
1366
#if defined(CONFIG_USER_ONLY)
1367

    
1368
void do_smm_enter(void)
1369
{
1370
}
1371

    
1372
void helper_rsm(void)
1373
{
1374
}
1375

    
1376
#else
1377

    
1378
#ifdef TARGET_X86_64
1379
#define SMM_REVISION_ID 0x00020064
1380
#else
1381
#define SMM_REVISION_ID 0x00020000
1382
#endif
1383

    
1384
void do_smm_enter(void)
1385
{
1386
    target_ulong sm_state;
1387
    SegmentCache *dt;
1388
    int i, offset;
1389

    
1390
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392

    
1393
    env->hflags |= HF_SMM_MASK;
1394
    cpu_smm_update(env);
1395

    
1396
    sm_state = env->smbase + 0x8000;
1397

    
1398
#ifdef TARGET_X86_64
1399
    for(i = 0; i < 6; i++) {
1400
        dt = &env->segs[i];
1401
        offset = 0x7e00 + i * 16;
1402
        stw_phys(sm_state + offset, dt->selector);
1403
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404
        stl_phys(sm_state + offset + 4, dt->limit);
1405
        stq_phys(sm_state + offset + 8, dt->base);
1406
    }
1407

    
1408
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1409
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410

    
1411
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1413
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415

    
1416
    stq_phys(sm_state + 0x7e88, env->idt.base);
1417
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1418

    
1419
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1420
    stq_phys(sm_state + 0x7e98, env->tr.base);
1421
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1422
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423

    
1424
    stq_phys(sm_state + 0x7ed0, env->efer);
1425

    
1426
    stq_phys(sm_state + 0x7ff8, EAX);
1427
    stq_phys(sm_state + 0x7ff0, ECX);
1428
    stq_phys(sm_state + 0x7fe8, EDX);
1429
    stq_phys(sm_state + 0x7fe0, EBX);
1430
    stq_phys(sm_state + 0x7fd8, ESP);
1431
    stq_phys(sm_state + 0x7fd0, EBP);
1432
    stq_phys(sm_state + 0x7fc8, ESI);
1433
    stq_phys(sm_state + 0x7fc0, EDI);
1434
    for(i = 8; i < 16; i++)
1435
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436
    stq_phys(sm_state + 0x7f78, env->eip);
1437
    stl_phys(sm_state + 0x7f70, compute_eflags());
1438
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1439
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1440

    
1441
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1442
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1443
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1444

    
1445
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446
    stl_phys(sm_state + 0x7f00, env->smbase);
1447
#else
1448
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1451
    stl_phys(sm_state + 0x7ff0, env->eip);
1452
    stl_phys(sm_state + 0x7fec, EDI);
1453
    stl_phys(sm_state + 0x7fe8, ESI);
1454
    stl_phys(sm_state + 0x7fe4, EBP);
1455
    stl_phys(sm_state + 0x7fe0, ESP);
1456
    stl_phys(sm_state + 0x7fdc, EBX);
1457
    stl_phys(sm_state + 0x7fd8, EDX);
1458
    stl_phys(sm_state + 0x7fd4, ECX);
1459
    stl_phys(sm_state + 0x7fd0, EAX);
1460
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462

    
1463
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464
    stl_phys(sm_state + 0x7f64, env->tr.base);
1465
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1466
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467

    
1468
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1470
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472

    
1473
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1474
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475

    
1476
    stl_phys(sm_state + 0x7f58, env->idt.base);
1477
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1478

    
1479
    for(i = 0; i < 6; i++) {
1480
        dt = &env->segs[i];
1481
        if (i < 3)
1482
            offset = 0x7f84 + i * 12;
1483
        else
1484
            offset = 0x7f2c + (i - 3) * 12;
1485
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486
        stl_phys(sm_state + offset + 8, dt->base);
1487
        stl_phys(sm_state + offset + 4, dt->limit);
1488
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489
    }
1490
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1491

    
1492
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493
    stl_phys(sm_state + 0x7ef8, env->smbase);
1494
#endif
1495
    /* init SMM cpu state */
1496

    
1497
#ifdef TARGET_X86_64
1498
    cpu_load_efer(env, 0);
1499
#endif
1500
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501
    env->eip = 0x00008000;
1502
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503
                           0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509

    
1510
    cpu_x86_update_cr0(env,
1511
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512
    cpu_x86_update_cr4(env, 0);
1513
    env->dr[7] = 0x00000400;
1514
    CC_OP = CC_OP_EFLAGS;
1515
}
1516

    
1517
void helper_rsm(void)
1518
{
1519
    target_ulong sm_state;
1520
    int i, offset;
1521
    uint32_t val;
1522

    
1523
    sm_state = env->smbase + 0x8000;
1524
#ifdef TARGET_X86_64
1525
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526

    
1527
    for(i = 0; i < 6; i++) {
1528
        offset = 0x7e00 + i * 16;
1529
        cpu_x86_load_seg_cache(env, i,
1530
                               lduw_phys(sm_state + offset),
1531
                               ldq_phys(sm_state + offset + 8),
1532
                               ldl_phys(sm_state + offset + 4),
1533
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534
    }
1535

    
1536
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538

    
1539
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543

    
1544
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1545
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546

    
1547
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1549
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551

    
1552
    EAX = ldq_phys(sm_state + 0x7ff8);
1553
    ECX = ldq_phys(sm_state + 0x7ff0);
1554
    EDX = ldq_phys(sm_state + 0x7fe8);
1555
    EBX = ldq_phys(sm_state + 0x7fe0);
1556
    ESP = ldq_phys(sm_state + 0x7fd8);
1557
    EBP = ldq_phys(sm_state + 0x7fd0);
1558
    ESI = ldq_phys(sm_state + 0x7fc8);
1559
    EDI = ldq_phys(sm_state + 0x7fc0);
1560
    for(i = 8; i < 16; i++)
1561
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562
    env->eip = ldq_phys(sm_state + 0x7f78);
1563
    load_eflags(ldl_phys(sm_state + 0x7f70),
1564
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567

    
1568
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571

    
1572
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573
    if (val & 0x20000) {
1574
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575
    }
1576
#else
1577
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1580
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581
    env->eip = ldl_phys(sm_state + 0x7ff0);
1582
    EDI = ldl_phys(sm_state + 0x7fec);
1583
    ESI = ldl_phys(sm_state + 0x7fe8);
1584
    EBP = ldl_phys(sm_state + 0x7fe4);
1585
    ESP = ldl_phys(sm_state + 0x7fe0);
1586
    EBX = ldl_phys(sm_state + 0x7fdc);
1587
    EDX = ldl_phys(sm_state + 0x7fd8);
1588
    ECX = ldl_phys(sm_state + 0x7fd4);
1589
    EAX = ldl_phys(sm_state + 0x7fd0);
1590
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592

    
1593
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1595
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597

    
1598
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602

    
1603
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605

    
1606
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1607
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608

    
1609
    for(i = 0; i < 6; i++) {
1610
        if (i < 3)
1611
            offset = 0x7f84 + i * 12;
1612
        else
1613
            offset = 0x7f2c + (i - 3) * 12;
1614
        cpu_x86_load_seg_cache(env, i,
1615
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616
                               ldl_phys(sm_state + offset + 8),
1617
                               ldl_phys(sm_state + offset + 4),
1618
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619
    }
1620
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621

    
1622
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623
    if (val & 0x20000) {
1624
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625
    }
1626
#endif
1627
    CC_OP = CC_OP_EFLAGS;
1628
    env->hflags &= ~HF_SMM_MASK;
1629
    cpu_smm_update(env);
1630

    
1631
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1633
}
1634

    
1635
#endif /* !CONFIG_USER_ONLY */
1636

    
1637

    
1638
/* division, flags are undefined */
1639

    
1640
void helper_divb_AL(target_ulong t0)
1641
{
1642
    unsigned int num, den, q, r;
1643

    
1644
    num = (EAX & 0xffff);
1645
    den = (t0 & 0xff);
1646
    if (den == 0) {
1647
        raise_exception(EXCP00_DIVZ);
1648
    }
1649
    q = (num / den);
1650
    if (q > 0xff)
1651
        raise_exception(EXCP00_DIVZ);
1652
    q &= 0xff;
1653
    r = (num % den) & 0xff;
1654
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1655
}
1656

    
1657
void helper_idivb_AL(target_ulong t0)
1658
{
1659
    int num, den, q, r;
1660

    
1661
    num = (int16_t)EAX;
1662
    den = (int8_t)t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    if (q != (int8_t)q)
1668
        raise_exception(EXCP00_DIVZ);
1669
    q &= 0xff;
1670
    r = (num % den) & 0xff;
1671
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1672
}
1673

    
1674
void helper_divw_AX(target_ulong t0)
1675
{
1676
    unsigned int num, den, q, r;
1677

    
1678
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679
    den = (t0 & 0xffff);
1680
    if (den == 0) {
1681
        raise_exception(EXCP00_DIVZ);
1682
    }
1683
    q = (num / den);
1684
    if (q > 0xffff)
1685
        raise_exception(EXCP00_DIVZ);
1686
    q &= 0xffff;
1687
    r = (num % den) & 0xffff;
1688
    EAX = (EAX & ~0xffff) | q;
1689
    EDX = (EDX & ~0xffff) | r;
1690
}
1691

    
1692
void helper_idivw_AX(target_ulong t0)
1693
{
1694
    int num, den, q, r;
1695

    
1696
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697
    den = (int16_t)t0;
1698
    if (den == 0) {
1699
        raise_exception(EXCP00_DIVZ);
1700
    }
1701
    q = (num / den);
1702
    if (q != (int16_t)q)
1703
        raise_exception(EXCP00_DIVZ);
1704
    q &= 0xffff;
1705
    r = (num % den) & 0xffff;
1706
    EAX = (EAX & ~0xffff) | q;
1707
    EDX = (EDX & ~0xffff) | r;
1708
}
1709

    
1710
void helper_divl_EAX(target_ulong t0)
1711
{
1712
    unsigned int den, r;
1713
    uint64_t num, q;
1714

    
1715
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716
    den = t0;
1717
    if (den == 0) {
1718
        raise_exception(EXCP00_DIVZ);
1719
    }
1720
    q = (num / den);
1721
    r = (num % den);
1722
    if (q > 0xffffffff)
1723
        raise_exception(EXCP00_DIVZ);
1724
    EAX = (uint32_t)q;
1725
    EDX = (uint32_t)r;
1726
}
1727

    
1728
void helper_idivl_EAX(target_ulong t0)
1729
{
1730
    int den, r;
1731
    int64_t num, q;
1732

    
1733
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734
    den = t0;
1735
    if (den == 0) {
1736
        raise_exception(EXCP00_DIVZ);
1737
    }
1738
    q = (num / den);
1739
    r = (num % den);
1740
    if (q != (int32_t)q)
1741
        raise_exception(EXCP00_DIVZ);
1742
    EAX = (uint32_t)q;
1743
    EDX = (uint32_t)r;
1744
}
1745

    
1746
/* bcd */
1747

    
1748
/* XXX: exception */
1749
void helper_aam(int base)
1750
{
1751
    int al, ah;
1752
    al = EAX & 0xff;
1753
    ah = al / base;
1754
    al = al % base;
1755
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1756
    CC_DST = al;
1757
}
1758

    
1759
void helper_aad(int base)
1760
{
1761
    int al, ah;
1762
    al = EAX & 0xff;
1763
    ah = (EAX >> 8) & 0xff;
1764
    al = ((ah * base) + al) & 0xff;
1765
    EAX = (EAX & ~0xffff) | al;
1766
    CC_DST = al;
1767
}
1768

    
1769
void helper_aaa(void)
1770
{
1771
    int icarry;
1772
    int al, ah, af;
1773
    int eflags;
1774

    
1775
    eflags = helper_cc_compute_all(CC_OP);
1776
    af = eflags & CC_A;
1777
    al = EAX & 0xff;
1778
    ah = (EAX >> 8) & 0xff;
1779

    
1780
    icarry = (al > 0xf9);
1781
    if (((al & 0x0f) > 9 ) || af) {
1782
        al = (al + 6) & 0x0f;
1783
        ah = (ah + 1 + icarry) & 0xff;
1784
        eflags |= CC_C | CC_A;
1785
    } else {
1786
        eflags &= ~(CC_C | CC_A);
1787
        al &= 0x0f;
1788
    }
1789
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1790
    CC_SRC = eflags;
1791
}
1792

    
1793
void helper_aas(void)
1794
{
1795
    int icarry;
1796
    int al, ah, af;
1797
    int eflags;
1798

    
1799
    eflags = helper_cc_compute_all(CC_OP);
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802
    ah = (EAX >> 8) & 0xff;
1803

    
1804
    icarry = (al < 6);
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        al = (al - 6) & 0x0f;
1807
        ah = (ah - 1 - icarry) & 0xff;
1808
        eflags |= CC_C | CC_A;
1809
    } else {
1810
        eflags &= ~(CC_C | CC_A);
1811
        al &= 0x0f;
1812
    }
1813
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1814
    CC_SRC = eflags;
1815
}
1816

    
1817
void helper_daa(void)
1818
{
1819
    int al, af, cf;
1820
    int eflags;
1821

    
1822
    eflags = helper_cc_compute_all(CC_OP);
1823
    cf = eflags & CC_C;
1824
    af = eflags & CC_A;
1825
    al = EAX & 0xff;
1826

    
1827
    eflags = 0;
1828
    if (((al & 0x0f) > 9 ) || af) {
1829
        al = (al + 6) & 0xff;
1830
        eflags |= CC_A;
1831
    }
1832
    if ((al > 0x9f) || cf) {
1833
        al = (al + 0x60) & 0xff;
1834
        eflags |= CC_C;
1835
    }
1836
    EAX = (EAX & ~0xff) | al;
1837
    /* well, speed is not an issue here, so we compute the flags by hand */
1838
    eflags |= (al == 0) << 6; /* zf */
1839
    eflags |= parity_table[al]; /* pf */
1840
    eflags |= (al & 0x80); /* sf */
1841
    CC_SRC = eflags;
1842
}
1843

    
1844
void helper_das(void)
1845
{
1846
    int al, al1, af, cf;
1847
    int eflags;
1848

    
1849
    eflags = helper_cc_compute_all(CC_OP);
1850
    cf = eflags & CC_C;
1851
    af = eflags & CC_A;
1852
    al = EAX & 0xff;
1853

    
1854
    eflags = 0;
1855
    al1 = al;
1856
    if (((al & 0x0f) > 9 ) || af) {
1857
        eflags |= CC_A;
1858
        if (al < 6 || cf)
1859
            eflags |= CC_C;
1860
        al = (al - 6) & 0xff;
1861
    }
1862
    if ((al1 > 0x99) || cf) {
1863
        al = (al - 0x60) & 0xff;
1864
        eflags |= CC_C;
1865
    }
1866
    EAX = (EAX & ~0xff) | al;
1867
    /* well, speed is not an issue here, so we compute the flags by hand */
1868
    eflags |= (al == 0) << 6; /* zf */
1869
    eflags |= parity_table[al]; /* pf */
1870
    eflags |= (al & 0x80); /* sf */
1871
    CC_SRC = eflags;
1872
}
1873

    
1874
void helper_into(int next_eip_addend)
1875
{
1876
    int eflags;
1877
    eflags = helper_cc_compute_all(CC_OP);
1878
    if (eflags & CC_O) {
1879
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880
    }
1881
}
1882

    
1883
void helper_cmpxchg8b(target_ulong a0)
1884
{
1885
    uint64_t d;
1886
    int eflags;
1887

    
1888
    eflags = helper_cc_compute_all(CC_OP);
1889
    d = ldq(a0);
1890
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892
        eflags |= CC_Z;
1893
    } else {
1894
        /* always do the store */
1895
        stq(a0, d); 
1896
        EDX = (uint32_t)(d >> 32);
1897
        EAX = (uint32_t)d;
1898
        eflags &= ~CC_Z;
1899
    }
1900
    CC_SRC = eflags;
1901
}
1902

    
1903
#ifdef TARGET_X86_64
1904
void helper_cmpxchg16b(target_ulong a0)
1905
{
1906
    uint64_t d0, d1;
1907
    int eflags;
1908

    
1909
    if ((a0 & 0xf) != 0)
1910
        raise_exception(EXCP0D_GPF);
1911
    eflags = helper_cc_compute_all(CC_OP);
1912
    d0 = ldq(a0);
1913
    d1 = ldq(a0 + 8);
1914
    if (d0 == EAX && d1 == EDX) {
1915
        stq(a0, EBX);
1916
        stq(a0 + 8, ECX);
1917
        eflags |= CC_Z;
1918
    } else {
1919
        /* always do the store */
1920
        stq(a0, d0); 
1921
        stq(a0 + 8, d1); 
1922
        EDX = d1;
1923
        EAX = d0;
1924
        eflags &= ~CC_Z;
1925
    }
1926
    CC_SRC = eflags;
1927
}
1928
#endif
1929

    
1930
void helper_single_step(void)
1931
{
1932
#ifndef CONFIG_USER_ONLY
1933
    check_hw_breakpoints(env, 1);
1934
    env->dr[6] |= DR6_BS;
1935
#endif
1936
    raise_exception(EXCP01_DB);
1937
}
1938

    
1939
void helper_cpuid(void)
1940
{
1941
    uint32_t eax, ebx, ecx, edx;
1942

    
1943
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944

    
1945
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1946
    EAX = eax;
1947
    EBX = ebx;
1948
    ECX = ecx;
1949
    EDX = edx;
1950
}
1951

    
1952
void helper_enter_level(int level, int data32, target_ulong t1)
1953
{
1954
    target_ulong ssp;
1955
    uint32_t esp_mask, esp, ebp;
1956

    
1957
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958
    ssp = env->segs[R_SS].base;
1959
    ebp = EBP;
1960
    esp = ESP;
1961
    if (data32) {
1962
        /* 32 bit */
1963
        esp -= 4;
1964
        while (--level) {
1965
            esp -= 4;
1966
            ebp -= 4;
1967
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968
        }
1969
        esp -= 4;
1970
        stl(ssp + (esp & esp_mask), t1);
1971
    } else {
1972
        /* 16 bit */
1973
        esp -= 2;
1974
        while (--level) {
1975
            esp -= 2;
1976
            ebp -= 2;
1977
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978
        }
1979
        esp -= 2;
1980
        stw(ssp + (esp & esp_mask), t1);
1981
    }
1982
}
1983

    
1984
#ifdef TARGET_X86_64
1985
void helper_enter64_level(int level, int data64, target_ulong t1)
1986
{
1987
    target_ulong esp, ebp;
1988
    ebp = EBP;
1989
    esp = ESP;
1990

    
1991
    if (data64) {
1992
        /* 64 bit */
1993
        esp -= 8;
1994
        while (--level) {
1995
            esp -= 8;
1996
            ebp -= 8;
1997
            stq(esp, ldq(ebp));
1998
        }
1999
        esp -= 8;
2000
        stq(esp, t1);
2001
    } else {
2002
        /* 16 bit */
2003
        esp -= 2;
2004
        while (--level) {
2005
            esp -= 2;
2006
            ebp -= 2;
2007
            stw(esp, lduw(ebp));
2008
        }
2009
        esp -= 2;
2010
        stw(esp, t1);
2011
    }
2012
}
2013
#endif
2014

    
2015
void helper_lldt(int selector)
2016
{
2017
    SegmentCache *dt;
2018
    uint32_t e1, e2;
2019
    int index, entry_limit;
2020
    target_ulong ptr;
2021

    
2022
    selector &= 0xffff;
2023
    if ((selector & 0xfffc) == 0) {
2024
        /* XXX: NULL selector case: invalid LDT */
2025
        env->ldt.base = 0;
2026
        env->ldt.limit = 0;
2027
    } else {
2028
        if (selector & 0x4)
2029
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030
        dt = &env->gdt;
2031
        index = selector & ~7;
2032
#ifdef TARGET_X86_64
2033
        if (env->hflags & HF_LMA_MASK)
2034
            entry_limit = 15;
2035
        else
2036
#endif
2037
            entry_limit = 7;
2038
        if ((index + entry_limit) > dt->limit)
2039
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040
        ptr = dt->base + index;
2041
        e1 = ldl_kernel(ptr);
2042
        e2 = ldl_kernel(ptr + 4);
2043
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045
        if (!(e2 & DESC_P_MASK))
2046
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047
#ifdef TARGET_X86_64
2048
        if (env->hflags & HF_LMA_MASK) {
2049
            uint32_t e3;
2050
            e3 = ldl_kernel(ptr + 8);
2051
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052
            env->ldt.base |= (target_ulong)e3 << 32;
2053
        } else
2054
#endif
2055
        {
2056
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057
        }
2058
    }
2059
    env->ldt.selector = selector;
2060
}
2061

    
2062
void helper_ltr(int selector)
2063
{
2064
    SegmentCache *dt;
2065
    uint32_t e1, e2;
2066
    int index, type, entry_limit;
2067
    target_ulong ptr;
2068

    
2069
    selector &= 0xffff;
2070
    if ((selector & 0xfffc) == 0) {
2071
        /* NULL selector case: invalid TR */
2072
        env->tr.base = 0;
2073
        env->tr.limit = 0;
2074
        env->tr.flags = 0;
2075
    } else {
2076
        if (selector & 0x4)
2077
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078
        dt = &env->gdt;
2079
        index = selector & ~7;
2080
#ifdef TARGET_X86_64
2081
        if (env->hflags & HF_LMA_MASK)
2082
            entry_limit = 15;
2083
        else
2084
#endif
2085
            entry_limit = 7;
2086
        if ((index + entry_limit) > dt->limit)
2087
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088
        ptr = dt->base + index;
2089
        e1 = ldl_kernel(ptr);
2090
        e2 = ldl_kernel(ptr + 4);
2091
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092
        if ((e2 & DESC_S_MASK) ||
2093
            (type != 1 && type != 9))
2094
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095
        if (!(e2 & DESC_P_MASK))
2096
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097
#ifdef TARGET_X86_64
2098
        if (env->hflags & HF_LMA_MASK) {
2099
            uint32_t e3, e4;
2100
            e3 = ldl_kernel(ptr + 8);
2101
            e4 = ldl_kernel(ptr + 12);
2102
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2105
            env->tr.base |= (target_ulong)e3 << 32;
2106
        } else
2107
#endif
2108
        {
2109
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2110
        }
2111
        e2 |= DESC_TSS_BUSY_MASK;
2112
        stl_kernel(ptr + 4, e2);
2113
    }
2114
    env->tr.selector = selector;
2115
}
2116

    
2117
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118
void helper_load_seg(int seg_reg, int selector)
2119
{
2120
    uint32_t e1, e2;
2121
    int cpl, dpl, rpl;
2122
    SegmentCache *dt;
2123
    int index;
2124
    target_ulong ptr;
2125

    
2126
    selector &= 0xffff;
2127
    cpl = env->hflags & HF_CPL_MASK;
2128
    if ((selector & 0xfffc) == 0) {
2129
        /* null selector case */
2130
        if (seg_reg == R_SS
2131
#ifdef TARGET_X86_64
2132
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133
#endif
2134
            )
2135
            raise_exception_err(EXCP0D_GPF, 0);
2136
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137
    } else {
2138

    
2139
        if (selector & 0x4)
2140
            dt = &env->ldt;
2141
        else
2142
            dt = &env->gdt;
2143
        index = selector & ~7;
2144
        if ((index + 7) > dt->limit)
2145
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146
        ptr = dt->base + index;
2147
        e1 = ldl_kernel(ptr);
2148
        e2 = ldl_kernel(ptr + 4);
2149

    
2150
        if (!(e2 & DESC_S_MASK))
2151
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152
        rpl = selector & 3;
2153
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154
        if (seg_reg == R_SS) {
2155
            /* must be writable segment */
2156
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158
            if (rpl != cpl || dpl != cpl)
2159
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160
        } else {
2161
            /* must be readable segment */
2162
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164

    
2165
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166
                /* if not conforming code, test rights */
2167
                if (dpl < cpl || dpl < rpl)
2168
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169
            }
2170
        }
2171

    
2172
        if (!(e2 & DESC_P_MASK)) {
2173
            if (seg_reg == R_SS)
2174
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175
            else
2176
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177
        }
2178

    
2179
        /* set the access bit if not already set */
2180
        if (!(e2 & DESC_A_MASK)) {
2181
            e2 |= DESC_A_MASK;
2182
            stl_kernel(ptr + 4, e2);
2183
        }
2184

    
2185
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2186
                       get_seg_base(e1, e2),
2187
                       get_seg_limit(e1, e2),
2188
                       e2);
2189
#if 0
2190
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192
#endif
2193
    }
2194
}
2195

    
2196
/* protected mode jump */
2197
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198
                           int next_eip_addend)
2199
{
2200
    int gate_cs, type;
2201
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2202
    target_ulong next_eip;
2203

    
2204
    if ((new_cs & 0xfffc) == 0)
2205
        raise_exception_err(EXCP0D_GPF, 0);
2206
    if (load_segment(&e1, &e2, new_cs) != 0)
2207
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
    cpl = env->hflags & HF_CPL_MASK;
2209
    if (e2 & DESC_S_MASK) {
2210
        if (!(e2 & DESC_CS_MASK))
2211
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213
        if (e2 & DESC_C_MASK) {
2214
            /* conforming code segment */
2215
            if (dpl > cpl)
2216
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
        } else {
2218
            /* non conforming code segment */
2219
            rpl = new_cs & 3;
2220
            if (rpl > cpl)
2221
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
            if (dpl != cpl)
2223
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224
        }
2225
        if (!(e2 & DESC_P_MASK))
2226
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227
        limit = get_seg_limit(e1, e2);
2228
        if (new_eip > limit &&
2229
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232
                       get_seg_base(e1, e2), limit, e2);
2233
        EIP = new_eip;
2234
    } else {
2235
        /* jump to call or task gate */
2236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237
        rpl = new_cs & 3;
2238
        cpl = env->hflags & HF_CPL_MASK;
2239
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240
        switch(type) {
2241
        case 1: /* 286 TSS */
2242
        case 9: /* 386 TSS */
2243
        case 5: /* task gate */
2244
            if (dpl < cpl || dpl < rpl)
2245
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246
            next_eip = env->eip + next_eip_addend;
2247
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248
            CC_OP = CC_OP_EFLAGS;
2249
            break;
2250
        case 4: /* 286 call gate */
2251
        case 12: /* 386 call gate */
2252
            if ((dpl < cpl) || (dpl < rpl))
2253
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254
            if (!(e2 & DESC_P_MASK))
2255
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256
            gate_cs = e1 >> 16;
2257
            new_eip = (e1 & 0xffff);
2258
            if (type == 12)
2259
                new_eip |= (e2 & 0xffff0000);
2260
            if (load_segment(&e1, &e2, gate_cs) != 0)
2261
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263
            /* must be code segment */
2264
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265
                 (DESC_S_MASK | DESC_CS_MASK)))
2266
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270
            if (!(e2 & DESC_P_MASK))
2271
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272
            limit = get_seg_limit(e1, e2);
2273
            if (new_eip > limit)
2274
                raise_exception_err(EXCP0D_GPF, 0);
2275
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276
                                   get_seg_base(e1, e2), limit, e2);
2277
            EIP = new_eip;
2278
            break;
2279
        default:
2280
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281
            break;
2282
        }
2283
    }
2284
}
2285

    
2286
/* real mode call */
2287
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288
                       int shift, int next_eip)
2289
{
2290
    int new_eip;
2291
    uint32_t esp, esp_mask;
2292
    target_ulong ssp;
2293

    
2294
    new_eip = new_eip1;
2295
    esp = ESP;
2296
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297
    ssp = env->segs[R_SS].base;
2298
    if (shift) {
2299
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300
        PUSHL(ssp, esp, esp_mask, next_eip);
2301
    } else {
2302
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303
        PUSHW(ssp, esp, esp_mask, next_eip);
2304
    }
2305

    
2306
    SET_ESP(esp, esp_mask);
2307
    env->eip = new_eip;
2308
    env->segs[R_CS].selector = new_cs;
2309
    env->segs[R_CS].base = (new_cs << 4);
2310
}
2311

    
2312
/* protected mode call */
2313
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2314
                            int shift, int next_eip_addend)
2315
{
2316
    int new_stack, i;
2317
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2318
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2319
    uint32_t val, limit, old_sp_mask;
2320
    target_ulong ssp, old_ssp, next_eip;
2321

    
2322
    next_eip = env->eip + next_eip_addend;
2323
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324
    LOG_PCALL_STATE(env);
2325
    if ((new_cs & 0xfffc) == 0)
2326
        raise_exception_err(EXCP0D_GPF, 0);
2327
    if (load_segment(&e1, &e2, new_cs) != 0)
2328
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329
    cpl = env->hflags & HF_CPL_MASK;
2330
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2331
    if (e2 & DESC_S_MASK) {
2332
        if (!(e2 & DESC_CS_MASK))
2333
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335
        if (e2 & DESC_C_MASK) {
2336
            /* conforming code segment */
2337
            if (dpl > cpl)
2338
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339
        } else {
2340
            /* non conforming code segment */
2341
            rpl = new_cs & 3;
2342
            if (rpl > cpl)
2343
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344
            if (dpl != cpl)
2345
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346
        }
2347
        if (!(e2 & DESC_P_MASK))
2348
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349

    
2350
#ifdef TARGET_X86_64
2351
        /* XXX: check 16/32 bit cases in long mode */
2352
        if (shift == 2) {
2353
            target_ulong rsp;
2354
            /* 64 bit case */
2355
            rsp = ESP;
2356
            PUSHQ(rsp, env->segs[R_CS].selector);
2357
            PUSHQ(rsp, next_eip);
2358
            /* from this point, not restartable */
2359
            ESP = rsp;
2360
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361
                                   get_seg_base(e1, e2),
2362
                                   get_seg_limit(e1, e2), e2);
2363
            EIP = new_eip;
2364
        } else
2365
#endif
2366
        {
2367
            sp = ESP;
2368
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369
            ssp = env->segs[R_SS].base;
2370
            if (shift) {
2371
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372
                PUSHL(ssp, sp, sp_mask, next_eip);
2373
            } else {
2374
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375
                PUSHW(ssp, sp, sp_mask, next_eip);
2376
            }
2377

    
2378
            limit = get_seg_limit(e1, e2);
2379
            if (new_eip > limit)
2380
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381
            /* from this point, not restartable */
2382
            SET_ESP(sp, sp_mask);
2383
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384
                                   get_seg_base(e1, e2), limit, e2);
2385
            EIP = new_eip;
2386
        }
2387
    } else {
2388
        /* check gate type */
2389
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
        rpl = new_cs & 3;
2392
        switch(type) {
2393
        case 1: /* available 286 TSS */
2394
        case 9: /* available 386 TSS */
2395
        case 5: /* task gate */
2396
            if (dpl < cpl || dpl < rpl)
2397
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399
            CC_OP = CC_OP_EFLAGS;
2400
            return;
2401
        case 4: /* 286 call gate */
2402
        case 12: /* 386 call gate */
2403
            break;
2404
        default:
2405
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406
            break;
2407
        }
2408
        shift = type >> 3;
2409

    
2410
        if (dpl < cpl || dpl < rpl)
2411
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412
        /* check valid bit */
2413
        if (!(e2 & DESC_P_MASK))
2414
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2415
        selector = e1 >> 16;
2416
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417
        param_count = e2 & 0x1f;
2418
        if ((selector & 0xfffc) == 0)
2419
            raise_exception_err(EXCP0D_GPF, 0);
2420

    
2421
        if (load_segment(&e1, &e2, selector) != 0)
2422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426
        if (dpl > cpl)
2427
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428
        if (!(e2 & DESC_P_MASK))
2429
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430

    
2431
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432
            /* to inner privilege */
2433
            get_ss_esp_from_tss(&ss, &sp, dpl);
2434
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2435
                        ss, sp, param_count, ESP);
2436
            if ((ss & 0xfffc) == 0)
2437
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438
            if ((ss & 3) != dpl)
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443
            if (ss_dpl != dpl)
2444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445
            if (!(ss_e2 & DESC_S_MASK) ||
2446
                (ss_e2 & DESC_CS_MASK) ||
2447
                !(ss_e2 & DESC_W_MASK))
2448
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449
            if (!(ss_e2 & DESC_P_MASK))
2450
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451

    
2452
            //            push_size = ((param_count * 2) + 8) << shift;
2453

    
2454
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455
            old_ssp = env->segs[R_SS].base;
2456

    
2457
            sp_mask = get_sp_mask(ss_e2);
2458
            ssp = get_seg_base(ss_e1, ss_e2);
2459
            if (shift) {
2460
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461
                PUSHL(ssp, sp, sp_mask, ESP);
2462
                for(i = param_count - 1; i >= 0; i--) {
2463
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464
                    PUSHL(ssp, sp, sp_mask, val);
2465
                }
2466
            } else {
2467
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468
                PUSHW(ssp, sp, sp_mask, ESP);
2469
                for(i = param_count - 1; i >= 0; i--) {
2470
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471
                    PUSHW(ssp, sp, sp_mask, val);
2472
                }
2473
            }
2474
            new_stack = 1;
2475
        } else {
2476
            /* to same privilege */
2477
            sp = ESP;
2478
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479
            ssp = env->segs[R_SS].base;
2480
            //            push_size = (4 << shift);
2481
            new_stack = 0;
2482
        }
2483

    
2484
        if (shift) {
2485
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486
            PUSHL(ssp, sp, sp_mask, next_eip);
2487
        } else {
2488
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489
            PUSHW(ssp, sp, sp_mask, next_eip);
2490
        }
2491

    
2492
        /* from this point, not restartable */
2493

    
2494
        if (new_stack) {
2495
            ss = (ss & ~3) | dpl;
2496
            cpu_x86_load_seg_cache(env, R_SS, ss,
2497
                                   ssp,
2498
                                   get_seg_limit(ss_e1, ss_e2),
2499
                                   ss_e2);
2500
        }
2501

    
2502
        selector = (selector & ~3) | dpl;
2503
        cpu_x86_load_seg_cache(env, R_CS, selector,
2504
                       get_seg_base(e1, e2),
2505
                       get_seg_limit(e1, e2),
2506
                       e2);
2507
        cpu_x86_set_cpl(env, dpl);
2508
        SET_ESP(sp, sp_mask);
2509
        EIP = offset;
2510
    }
2511
}
2512

    
2513
/* real and vm86 mode iret */
2514
void helper_iret_real(int shift)
2515
{
2516
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2517
    target_ulong ssp;
2518
    int eflags_mask;
2519

    
2520
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2521
    sp = ESP;
2522
    ssp = env->segs[R_SS].base;
2523
    if (shift == 1) {
2524
        /* 32 bits */
2525
        POPL(ssp, sp, sp_mask, new_eip);
2526
        POPL(ssp, sp, sp_mask, new_cs);
2527
        new_cs &= 0xffff;
2528
        POPL(ssp, sp, sp_mask, new_eflags);
2529
    } else {
2530
        /* 16 bits */
2531
        POPW(ssp, sp, sp_mask, new_eip);
2532
        POPW(ssp, sp, sp_mask, new_cs);
2533
        POPW(ssp, sp, sp_mask, new_eflags);
2534
    }
2535
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2536
    env->segs[R_CS].selector = new_cs;
2537
    env->segs[R_CS].base = (new_cs << 4);
2538
    env->eip = new_eip;
2539
    if (env->eflags & VM_MASK)
2540
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2541
    else
2542
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2543
    if (shift == 0)
2544
        eflags_mask &= 0xffff;
2545
    load_eflags(new_eflags, eflags_mask);
2546
    env->hflags2 &= ~HF2_NMI_MASK;
2547
}
2548

    
2549
static inline void validate_seg(int seg_reg, int cpl)
2550
{
2551
    int dpl;
2552
    uint32_t e2;
2553

    
2554
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2555
       they may still contain a valid base. I would be interested to
2556
       know how a real x86_64 CPU behaves */
2557
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2558
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2559
        return;
2560

    
2561
    e2 = env->segs[seg_reg].flags;
2562
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2564
        /* data or non conforming code segment */
2565
        if (dpl < cpl) {
2566
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2567
        }
2568
    }
2569
}
2570

    
2571
/* protected mode iret */
2572
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2573
{
2574
    uint32_t new_cs, new_eflags, new_ss;
2575
    uint32_t new_es, new_ds, new_fs, new_gs;
2576
    uint32_t e1, e2, ss_e1, ss_e2;
2577
    int cpl, dpl, rpl, eflags_mask, iopl;
2578
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2579

    
2580
#ifdef TARGET_X86_64
2581
    if (shift == 2)
2582
        sp_mask = -1;
2583
    else
2584
#endif
2585
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2586
    sp = ESP;
2587
    ssp = env->segs[R_SS].base;
2588
    new_eflags = 0; /* avoid warning */
2589
#ifdef TARGET_X86_64
2590
    if (shift == 2) {
2591
        POPQ(sp, new_eip);
2592
        POPQ(sp, new_cs);
2593
        new_cs &= 0xffff;
2594
        if (is_iret) {
2595
            POPQ(sp, new_eflags);
2596
        }
2597
    } else
2598
#endif
2599
    if (shift == 1) {
2600
        /* 32 bits */
2601
        POPL(ssp, sp, sp_mask, new_eip);
2602
        POPL(ssp, sp, sp_mask, new_cs);
2603
        new_cs &= 0xffff;
2604
        if (is_iret) {
2605
            POPL(ssp, sp, sp_mask, new_eflags);
2606
            if (new_eflags & VM_MASK)
2607
                goto return_to_vm86;
2608
        }
2609
    } else {
2610
        /* 16 bits */
2611
        POPW(ssp, sp, sp_mask, new_eip);
2612
        POPW(ssp, sp, sp_mask, new_cs);
2613
        if (is_iret)
2614
            POPW(ssp, sp, sp_mask, new_eflags);
2615
    }
2616
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2617
              new_cs, new_eip, shift, addend);
2618
    LOG_PCALL_STATE(env);
2619
    if ((new_cs & 0xfffc) == 0)
2620
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621
    if (load_segment(&e1, &e2, new_cs) != 0)
2622
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2623
    if (!(e2 & DESC_S_MASK) ||
2624
        !(e2 & DESC_CS_MASK))
2625
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626
    cpl = env->hflags & HF_CPL_MASK;
2627
    rpl = new_cs & 3;
2628
    if (rpl < cpl)
2629
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2631
    if (e2 & DESC_C_MASK) {
2632
        if (dpl > rpl)
2633
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634
    } else {
2635
        if (dpl != rpl)
2636
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637
    }
2638
    if (!(e2 & DESC_P_MASK))
2639
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2640

    
2641
    sp += addend;
2642
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2643
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2644
        /* return to same privilege level */
2645
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2646
                       get_seg_base(e1, e2),
2647
                       get_seg_limit(e1, e2),
2648
                       e2);
2649
    } else {
2650
        /* return to different privilege level */
2651
#ifdef TARGET_X86_64
2652
        if (shift == 2) {
2653
            POPQ(sp, new_esp);
2654
            POPQ(sp, new_ss);
2655
            new_ss &= 0xffff;
2656
        } else
2657
#endif
2658
        if (shift == 1) {
2659
            /* 32 bits */
2660
            POPL(ssp, sp, sp_mask, new_esp);
2661
            POPL(ssp, sp, sp_mask, new_ss);
2662
            new_ss &= 0xffff;
2663
        } else {
2664
            /* 16 bits */
2665
            POPW(ssp, sp, sp_mask, new_esp);
2666
            POPW(ssp, sp, sp_mask, new_ss);
2667
        }
2668
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2669
                    new_ss, new_esp);
2670
        if ((new_ss & 0xfffc) == 0) {
2671
#ifdef TARGET_X86_64
2672
            /* NULL ss is allowed in long mode if cpl != 3*/
2673
            /* XXX: test CS64 ? */
2674
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2675
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2676
                                       0, 0xffffffff,
2677
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2678
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2679
                                       DESC_W_MASK | DESC_A_MASK);
2680
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2681
            } else
2682
#endif
2683
            {
2684
                raise_exception_err(EXCP0D_GPF, 0);
2685
            }
2686
        } else {
2687
            if ((new_ss & 3) != rpl)
2688
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2690
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691
            if (!(ss_e2 & DESC_S_MASK) ||
2692
                (ss_e2 & DESC_CS_MASK) ||
2693
                !(ss_e2 & DESC_W_MASK))
2694
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2696
            if (dpl != rpl)
2697
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2698
            if (!(ss_e2 & DESC_P_MASK))
2699
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2700
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2701
                                   get_seg_base(ss_e1, ss_e2),
2702
                                   get_seg_limit(ss_e1, ss_e2),
2703
                                   ss_e2);
2704
        }
2705

    
2706
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2707
                       get_seg_base(e1, e2),
2708
                       get_seg_limit(e1, e2),
2709
                       e2);
2710
        cpu_x86_set_cpl(env, rpl);
2711
        sp = new_esp;
2712
#ifdef TARGET_X86_64
2713
        if (env->hflags & HF_CS64_MASK)
2714
            sp_mask = -1;
2715
        else
2716
#endif
2717
            sp_mask = get_sp_mask(ss_e2);
2718

    
2719
        /* validate data segments */
2720
        validate_seg(R_ES, rpl);
2721
        validate_seg(R_DS, rpl);
2722
        validate_seg(R_FS, rpl);
2723
        validate_seg(R_GS, rpl);
2724

    
2725
        sp += addend;
2726
    }
2727
    SET_ESP(sp, sp_mask);
2728
    env->eip = new_eip;
2729
    if (is_iret) {
2730
        /* NOTE: 'cpl' is the _old_ CPL */
2731
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2732
        if (cpl == 0)
2733
            eflags_mask |= IOPL_MASK;
2734
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2735
        if (cpl <= iopl)
2736
            eflags_mask |= IF_MASK;
2737
        if (shift == 0)
2738
            eflags_mask &= 0xffff;
2739
        load_eflags(new_eflags, eflags_mask);
2740
    }
2741
    return;
2742

    
2743
 return_to_vm86:
2744
    POPL(ssp, sp, sp_mask, new_esp);
2745
    POPL(ssp, sp, sp_mask, new_ss);
2746
    POPL(ssp, sp, sp_mask, new_es);
2747
    POPL(ssp, sp, sp_mask, new_ds);
2748
    POPL(ssp, sp, sp_mask, new_fs);
2749
    POPL(ssp, sp, sp_mask, new_gs);
2750

    
2751
    /* modify processor state */
2752
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2753
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2754
    load_seg_vm(R_CS, new_cs & 0xffff);
2755
    cpu_x86_set_cpl(env, 3);
2756
    load_seg_vm(R_SS, new_ss & 0xffff);
2757
    load_seg_vm(R_ES, new_es & 0xffff);
2758
    load_seg_vm(R_DS, new_ds & 0xffff);
2759
    load_seg_vm(R_FS, new_fs & 0xffff);
2760
    load_seg_vm(R_GS, new_gs & 0xffff);
2761

    
2762
    env->eip = new_eip & 0xffff;
2763
    ESP = new_esp;
2764
}
2765

    
2766
void helper_iret_protected(int shift, int next_eip)
2767
{
2768
    int tss_selector, type;
2769
    uint32_t e1, e2;
2770

    
2771
    /* specific case for TSS */
2772
    if (env->eflags & NT_MASK) {
2773
#ifdef TARGET_X86_64
2774
        if (env->hflags & HF_LMA_MASK)
2775
            raise_exception_err(EXCP0D_GPF, 0);
2776
#endif
2777
        tss_selector = lduw_kernel(env->tr.base + 0);
2778
        if (tss_selector & 4)
2779
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780
        if (load_segment(&e1, &e2, tss_selector) != 0)
2781
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2783
        /* NOTE: we check both segment and busy TSS */
2784
        if (type != 3)
2785
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2787
    } else {
2788
        helper_ret_protected(shift, 1, 0);
2789
    }
2790
    env->hflags2 &= ~HF2_NMI_MASK;
2791
}
2792

    
2793
void helper_lret_protected(int shift, int addend)
2794
{
2795
    helper_ret_protected(shift, 0, addend);
2796
}
2797

    
2798
void helper_sysenter(void)
2799
{
2800
    if (env->sysenter_cs == 0) {
2801
        raise_exception_err(EXCP0D_GPF, 0);
2802
    }
2803
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2804
    cpu_x86_set_cpl(env, 0);
2805

    
2806
#ifdef TARGET_X86_64
2807
    if (env->hflags & HF_LMA_MASK) {
2808
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809
                               0, 0xffffffff,
2810
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811
                               DESC_S_MASK |
2812
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2813
    } else
2814
#endif
2815
    {
2816
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2817
                               0, 0xffffffff,
2818
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819
                               DESC_S_MASK |
2820
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2821
    }
2822
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2823
                           0, 0xffffffff,
2824
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2825
                           DESC_S_MASK |
2826
                           DESC_W_MASK | DESC_A_MASK);
2827
    ESP = env->sysenter_esp;
2828
    EIP = env->sysenter_eip;
2829
}
2830

    
2831
void helper_sysexit(int dflag)
2832
{
2833
    int cpl;
2834

    
2835
    cpl = env->hflags & HF_CPL_MASK;
2836
    if (env->sysenter_cs == 0 || cpl != 0) {
2837
        raise_exception_err(EXCP0D_GPF, 0);
2838
    }
2839
    cpu_x86_set_cpl(env, 3);
2840
#ifdef TARGET_X86_64
2841
    if (dflag == 2) {
2842
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2847
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2848
                               0, 0xffffffff,
2849
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851
                               DESC_W_MASK | DESC_A_MASK);
2852
    } else
2853
#endif
2854
    {
2855
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2860
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2861
                               0, 0xffffffff,
2862
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864
                               DESC_W_MASK | DESC_A_MASK);
2865
    }
2866
    ESP = ECX;
2867
    EIP = EDX;
2868
}
2869

    
2870
#if defined(CONFIG_USER_ONLY)
2871
target_ulong helper_read_crN(int reg)
2872
{
2873
    return 0;
2874
}
2875

    
2876
void helper_write_crN(int reg, target_ulong t0)
2877
{
2878
}
2879

    
2880
void helper_movl_drN_T0(int reg, target_ulong t0)
2881
{
2882
}
2883
#else
2884
target_ulong helper_read_crN(int reg)
2885
{
2886
    target_ulong val;
2887

    
2888
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2889
    switch(reg) {
2890
    default:
2891
        val = env->cr[reg];
2892
        break;
2893
    case 8:
2894
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2895
            val = cpu_get_apic_tpr(env->apic_state);
2896
        } else {
2897
            val = env->v_tpr;
2898
        }
2899
        break;
2900
    }
2901
    return val;
2902
}
2903

    
2904
void helper_write_crN(int reg, target_ulong t0)
2905
{
2906
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2907
    switch(reg) {
2908
    case 0:
2909
        cpu_x86_update_cr0(env, t0);
2910
        break;
2911
    case 3:
2912
        cpu_x86_update_cr3(env, t0);
2913
        break;
2914
    case 4:
2915
        cpu_x86_update_cr4(env, t0);
2916
        break;
2917
    case 8:
2918
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2919
            cpu_set_apic_tpr(env->apic_state, t0);
2920
        }
2921
        env->v_tpr = t0 & 0x0f;
2922
        break;
2923
    default:
2924
        env->cr[reg] = t0;
2925
        break;
2926
    }
2927
}
2928

    
2929
void helper_movl_drN_T0(int reg, target_ulong t0)
2930
{
2931
    int i;
2932

    
2933
    if (reg < 4) {
2934
        hw_breakpoint_remove(env, reg);
2935
        env->dr[reg] = t0;
2936
        hw_breakpoint_insert(env, reg);
2937
    } else if (reg == 7) {
2938
        for (i = 0; i < 4; i++)
2939
            hw_breakpoint_remove(env, i);
2940
        env->dr[7] = t0;
2941
        for (i = 0; i < 4; i++)
2942
            hw_breakpoint_insert(env, i);
2943
    } else
2944
        env->dr[reg] = t0;
2945
}
2946
#endif
2947

    
2948
void helper_lmsw(target_ulong t0)
2949
{
2950
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951
       if already set to one. */
2952
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2953
    helper_write_crN(0, t0);
2954
}
2955

    
2956
void helper_clts(void)
2957
{
2958
    env->cr[0] &= ~CR0_TS_MASK;
2959
    env->hflags &= ~HF_TS_MASK;
2960
}
2961

    
2962
void helper_invlpg(target_ulong addr)
2963
{
2964
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2965
    tlb_flush_page(env, addr);
2966
}
2967

    
2968
void helper_rdtsc(void)
2969
{
2970
    uint64_t val;
2971

    
2972
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2973
        raise_exception(EXCP0D_GPF);
2974
    }
2975
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2976

    
2977
    val = cpu_get_tsc(env) + env->tsc_offset;
2978
    EAX = (uint32_t)(val);
2979
    EDX = (uint32_t)(val >> 32);
2980
}
2981

    
2982
void helper_rdtscp(void)
2983
{
2984
    helper_rdtsc();
2985
    ECX = (uint32_t)(env->tsc_aux);
2986
}
2987

    
2988
void helper_rdpmc(void)
2989
{
2990
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2991
        raise_exception(EXCP0D_GPF);
2992
    }
2993
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2994
    
2995
    /* currently unimplemented */
2996
    raise_exception_err(EXCP06_ILLOP, 0);
2997
}
2998

    
2999
#if defined(CONFIG_USER_ONLY)
3000
void helper_wrmsr(void)
3001
{
3002
}
3003

    
3004
void helper_rdmsr(void)
3005
{
3006
}
3007
#else
3008
void helper_wrmsr(void)
3009
{
3010
    uint64_t val;
3011

    
3012
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3013

    
3014
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3015

    
3016
    switch((uint32_t)ECX) {
3017
    case MSR_IA32_SYSENTER_CS:
3018
        env->sysenter_cs = val & 0xffff;
3019
        break;
3020
    case MSR_IA32_SYSENTER_ESP:
3021
        env->sysenter_esp = val;
3022
        break;
3023
    case MSR_IA32_SYSENTER_EIP:
3024
        env->sysenter_eip = val;
3025
        break;
3026
    case MSR_IA32_APICBASE:
3027
        cpu_set_apic_base(env->apic_state, val);
3028
        break;
3029
    case MSR_EFER:
3030
        {
3031
            uint64_t update_mask;
3032
            update_mask = 0;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3034
                update_mask |= MSR_EFER_SCE;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3036
                update_mask |= MSR_EFER_LME;
3037
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038
                update_mask |= MSR_EFER_FFXSR;
3039
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3040
                update_mask |= MSR_EFER_NXE;
3041
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3042
                update_mask |= MSR_EFER_SVME;
3043
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3044
                update_mask |= MSR_EFER_FFXSR;
3045
            cpu_load_efer(env, (env->efer & ~update_mask) |
3046
                          (val & update_mask));
3047
        }
3048
        break;
3049
    case MSR_STAR:
3050
        env->star = val;
3051
        break;
3052
    case MSR_PAT:
3053
        env->pat = val;
3054
        break;
3055
    case MSR_VM_HSAVE_PA:
3056
        env->vm_hsave = val;
3057
        break;
3058
#ifdef TARGET_X86_64
3059
    case MSR_LSTAR:
3060
        env->lstar = val;
3061
        break;
3062
    case MSR_CSTAR:
3063
        env->cstar = val;
3064
        break;
3065
    case MSR_FMASK:
3066
        env->fmask = val;
3067
        break;
3068
    case MSR_FSBASE:
3069
        env->segs[R_FS].base = val;
3070
        break;
3071
    case MSR_GSBASE:
3072
        env->segs[R_GS].base = val;
3073
        break;
3074
    case MSR_KERNELGSBASE:
3075
        env->kernelgsbase = val;
3076
        break;
3077
#endif
3078
    case MSR_MTRRphysBase(0):
3079
    case MSR_MTRRphysBase(1):
3080
    case MSR_MTRRphysBase(2):
3081
    case MSR_MTRRphysBase(3):
3082
    case MSR_MTRRphysBase(4):
3083
    case MSR_MTRRphysBase(5):
3084
    case MSR_MTRRphysBase(6):
3085
    case MSR_MTRRphysBase(7):
3086
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3087
        break;
3088
    case MSR_MTRRphysMask(0):
3089
    case MSR_MTRRphysMask(1):
3090
    case MSR_MTRRphysMask(2):
3091
    case MSR_MTRRphysMask(3):
3092
    case MSR_MTRRphysMask(4):
3093
    case MSR_MTRRphysMask(5):
3094
    case MSR_MTRRphysMask(6):
3095
    case MSR_MTRRphysMask(7):
3096
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3097
        break;
3098
    case MSR_MTRRfix64K_00000:
3099
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3100
        break;
3101
    case MSR_MTRRfix16K_80000:
3102
    case MSR_MTRRfix16K_A0000:
3103
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3104
        break;
3105
    case MSR_MTRRfix4K_C0000:
3106
    case MSR_MTRRfix4K_C8000:
3107
    case MSR_MTRRfix4K_D0000:
3108
    case MSR_MTRRfix4K_D8000:
3109
    case MSR_MTRRfix4K_E0000:
3110
    case MSR_MTRRfix4K_E8000:
3111
    case MSR_MTRRfix4K_F0000:
3112
    case MSR_MTRRfix4K_F8000:
3113
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3114
        break;
3115
    case MSR_MTRRdefType:
3116
        env->mtrr_deftype = val;
3117
        break;
3118
    case MSR_MCG_STATUS:
3119
        env->mcg_status = val;
3120
        break;
3121
    case MSR_MCG_CTL:
3122
        if ((env->mcg_cap & MCG_CTL_P)
3123
            && (val == 0 || val == ~(uint64_t)0))
3124
            env->mcg_ctl = val;
3125
        break;
3126
    case MSR_TSC_AUX:
3127
        env->tsc_aux = val;
3128
        break;
3129
    default:
3130
        if ((uint32_t)ECX >= MSR_MC0_CTL
3131
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3132
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3133
            if ((offset & 0x3) != 0
3134
                || (val == 0 || val == ~(uint64_t)0))
3135
                env->mce_banks[offset] = val;
3136
            break;
3137
        }
3138
        /* XXX: exception ? */
3139
        break;
3140
    }
3141
}
3142

    
3143
void helper_rdmsr(void)
3144
{
3145
    uint64_t val;
3146

    
3147
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3148

    
3149
    switch((uint32_t)ECX) {
3150
    case MSR_IA32_SYSENTER_CS:
3151
        val = env->sysenter_cs;
3152
        break;
3153
    case MSR_IA32_SYSENTER_ESP:
3154
        val = env->sysenter_esp;
3155
        break;
3156
    case MSR_IA32_SYSENTER_EIP:
3157
        val = env->sysenter_eip;
3158
        break;
3159
    case MSR_IA32_APICBASE:
3160
        val = cpu_get_apic_base(env->apic_state);
3161
        break;
3162
    case MSR_EFER:
3163
        val = env->efer;
3164
        break;
3165
    case MSR_STAR:
3166
        val = env->star;
3167
        break;
3168
    case MSR_PAT:
3169
        val = env->pat;
3170
        break;
3171
    case MSR_VM_HSAVE_PA:
3172
        val = env->vm_hsave;
3173
        break;
3174
    case MSR_IA32_PERF_STATUS:
3175
        /* tsc_increment_by_tick */
3176
        val = 1000ULL;
3177
        /* CPU multiplier */
3178
        val |= (((uint64_t)4ULL) << 40);
3179
        break;
3180
#ifdef TARGET_X86_64
3181
    case MSR_LSTAR:
3182
        val = env->lstar;
3183
        break;
3184
    case MSR_CSTAR:
3185
        val = env->cstar;
3186
        break;
3187
    case MSR_FMASK:
3188
        val = env->fmask;
3189
        break;
3190
    case MSR_FSBASE:
3191
        val = env->segs[R_FS].base;
3192
        break;
3193
    case MSR_GSBASE:
3194
        val = env->segs[R_GS].base;
3195
        break;
3196
    case MSR_KERNELGSBASE:
3197
        val = env->kernelgsbase;
3198
        break;
3199
    case MSR_TSC_AUX:
3200
        val = env->tsc_aux;
3201
        break;
3202
#endif
3203
    case MSR_MTRRphysBase(0):
3204
    case MSR_MTRRphysBase(1):
3205
    case MSR_MTRRphysBase(2):
3206
    case MSR_MTRRphysBase(3):
3207
    case MSR_MTRRphysBase(4):
3208
    case MSR_MTRRphysBase(5):
3209
    case MSR_MTRRphysBase(6):
3210
    case MSR_MTRRphysBase(7):
3211
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3212
        break;
3213
    case MSR_MTRRphysMask(0):
3214
    case MSR_MTRRphysMask(1):
3215
    case MSR_MTRRphysMask(2):
3216
    case MSR_MTRRphysMask(3):
3217
    case MSR_MTRRphysMask(4):
3218
    case MSR_MTRRphysMask(5):
3219
    case MSR_MTRRphysMask(6):
3220
    case MSR_MTRRphysMask(7):
3221
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3222
        break;
3223
    case MSR_MTRRfix64K_00000:
3224
        val = env->mtrr_fixed[0];
3225
        break;
3226
    case MSR_MTRRfix16K_80000:
3227
    case MSR_MTRRfix16K_A0000:
3228
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3229
        break;
3230
    case MSR_MTRRfix4K_C0000:
3231
    case MSR_MTRRfix4K_C8000:
3232
    case MSR_MTRRfix4K_D0000:
3233
    case MSR_MTRRfix4K_D8000:
3234
    case MSR_MTRRfix4K_E0000:
3235
    case MSR_MTRRfix4K_E8000:
3236
    case MSR_MTRRfix4K_F0000:
3237
    case MSR_MTRRfix4K_F8000:
3238
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3239
        break;
3240
    case MSR_MTRRdefType:
3241
        val = env->mtrr_deftype;
3242
        break;
3243
    case MSR_MTRRcap:
3244
        if (env->cpuid_features & CPUID_MTRR)
3245
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3246
        else
3247
            /* XXX: exception ? */
3248
            val = 0;
3249
        break;
3250
    case MSR_MCG_CAP:
3251
        val = env->mcg_cap;
3252
        break;
3253
    case MSR_MCG_CTL:
3254
        if (env->mcg_cap & MCG_CTL_P)
3255
            val = env->mcg_ctl;
3256
        else
3257
            val = 0;
3258
        break;
3259
    case MSR_MCG_STATUS:
3260
        val = env->mcg_status;
3261
        break;
3262
    default:
3263
        if ((uint32_t)ECX >= MSR_MC0_CTL
3264
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3265
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3266
            val = env->mce_banks[offset];
3267
            break;
3268
        }
3269
        /* XXX: exception ? */
3270
        val = 0;
3271
        break;
3272
    }
3273
    EAX = (uint32_t)(val);
3274
    EDX = (uint32_t)(val >> 32);
3275
}
3276
#endif
3277

    
3278
target_ulong helper_lsl(target_ulong selector1)
3279
{
3280
    unsigned int limit;
3281
    uint32_t e1, e2, eflags, selector;
3282
    int rpl, dpl, cpl, type;
3283

    
3284
    selector = selector1 & 0xffff;
3285
    eflags = helper_cc_compute_all(CC_OP);
3286
    if ((selector & 0xfffc) == 0)
3287
        goto fail;
3288
    if (load_segment(&e1, &e2, selector) != 0)
3289
        goto fail;
3290
    rpl = selector & 3;
3291
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292
    cpl = env->hflags & HF_CPL_MASK;
3293
    if (e2 & DESC_S_MASK) {
3294
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3295
            /* conforming */
3296
        } else {
3297
            if (dpl < cpl || dpl < rpl)
3298
                goto fail;
3299
        }
3300
    } else {
3301
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3302
        switch(type) {
3303
        case 1:
3304
        case 2:
3305
        case 3:
3306
        case 9:
3307
        case 11:
3308
            break;
3309
        default:
3310
            goto fail;
3311
        }
3312
        if (dpl < cpl || dpl < rpl) {
3313
        fail:
3314
            CC_SRC = eflags & ~CC_Z;
3315
            return 0;
3316
        }
3317
    }
3318
    limit = get_seg_limit(e1, e2);
3319
    CC_SRC = eflags | CC_Z;
3320
    return limit;
3321
}
3322

    
3323
target_ulong helper_lar(target_ulong selector1)
3324
{
3325
    uint32_t e1, e2, eflags, selector;
3326
    int rpl, dpl, cpl, type;
3327

    
3328
    selector = selector1 & 0xffff;
3329
    eflags = helper_cc_compute_all(CC_OP);
3330
    if ((selector & 0xfffc) == 0)
3331
        goto fail;
3332
    if (load_segment(&e1, &e2, selector) != 0)
3333
        goto fail;
3334
    rpl = selector & 3;
3335
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3336
    cpl = env->hflags & HF_CPL_MASK;
3337
    if (e2 & DESC_S_MASK) {
3338
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3339
            /* conforming */
3340
        } else {
3341
            if (dpl < cpl || dpl < rpl)
3342
                goto fail;
3343
        }
3344
    } else {
3345
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3346
        switch(type) {
3347
        case 1:
3348
        case 2:
3349
        case 3:
3350
        case 4:
3351
        case 5:
3352
        case 9:
3353
        case 11:
3354
        case 12:
3355
            break;
3356
        default:
3357
            goto fail;
3358
        }
3359
        if (dpl < cpl || dpl < rpl) {
3360
        fail:
3361
            CC_SRC = eflags & ~CC_Z;
3362
            return 0;
3363
        }
3364
    }
3365
    CC_SRC = eflags | CC_Z;
3366
    return e2 & 0x00f0ff00;
3367
}
3368

    
3369
void helper_verr(target_ulong selector1)
3370
{
3371
    uint32_t e1, e2, eflags, selector;
3372
    int rpl, dpl, cpl;
3373

    
3374
    selector = selector1 & 0xffff;
3375
    eflags = helper_cc_compute_all(CC_OP);
3376
    if ((selector & 0xfffc) == 0)
3377
        goto fail;
3378
    if (load_segment(&e1, &e2, selector) != 0)
3379
        goto fail;
3380
    if (!(e2 & DESC_S_MASK))
3381
        goto fail;
3382
    rpl = selector & 3;
3383
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3384
    cpl = env->hflags & HF_CPL_MASK;
3385
    if (e2 & DESC_CS_MASK) {
3386
        if (!(e2 & DESC_R_MASK))
3387
            goto fail;
3388
        if (!(e2 & DESC_C_MASK)) {
3389
            if (dpl < cpl || dpl < rpl)
3390
                goto fail;
3391
        }
3392
    } else {
3393
        if (dpl < cpl || dpl < rpl) {
3394
        fail:
3395
            CC_SRC = eflags & ~CC_Z;
3396
            return;
3397
        }
3398
    }
3399
    CC_SRC = eflags | CC_Z;
3400
}
3401

    
3402
void helper_verw(target_ulong selector1)
3403
{
3404
    uint32_t e1, e2, eflags, selector;
3405
    int rpl, dpl, cpl;
3406

    
3407
    selector = selector1 & 0xffff;
3408
    eflags = helper_cc_compute_all(CC_OP);
3409
    if ((selector & 0xfffc) == 0)
3410
        goto fail;
3411
    if (load_segment(&e1, &e2, selector) != 0)
3412
        goto fail;
3413
    if (!(e2 & DESC_S_MASK))
3414
        goto fail;
3415
    rpl = selector & 3;
3416
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3417
    cpl = env->hflags & HF_CPL_MASK;
3418
    if (e2 & DESC_CS_MASK) {
3419
        goto fail;
3420
    } else {
3421
        if (dpl < cpl || dpl < rpl)
3422
            goto fail;
3423
        if (!(e2 & DESC_W_MASK)) {
3424
        fail:
3425
            CC_SRC = eflags & ~CC_Z;
3426
            return;
3427
        }
3428
    }
3429
    CC_SRC = eflags | CC_Z;
3430
}
3431

    
3432
/* x87 FPU helpers */
3433

    
3434
static void fpu_set_exception(int mask)
3435
{
3436
    env->fpus |= mask;
3437
    if (env->fpus & (~env->fpuc & FPUC_EM))
3438
        env->fpus |= FPUS_SE | FPUS_B;
3439
}
3440

    
3441
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3442
{
3443
    if (b == 0.0)
3444
        fpu_set_exception(FPUS_ZE);
3445
    return a / b;
3446
}
3447

    
3448
static void fpu_raise_exception(void)
3449
{
3450
    if (env->cr[0] & CR0_NE_MASK) {
3451
        raise_exception(EXCP10_COPR);
3452
    }
3453
#if !defined(CONFIG_USER_ONLY)
3454
    else {
3455
        cpu_set_ferr(env);
3456
    }
3457
#endif
3458
}
3459

    
3460
void helper_flds_FT0(uint32_t val)
3461
{
3462
    union {
3463
        float32 f;
3464
        uint32_t i;
3465
    } u;
3466
    u.i = val;
3467
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3468
}
3469

    
3470
void helper_fldl_FT0(uint64_t val)
3471
{
3472
    union {
3473
        float64 f;
3474
        uint64_t i;
3475
    } u;
3476
    u.i = val;
3477
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3478
}
3479

    
3480
void helper_fildl_FT0(int32_t val)
3481
{
3482
    FT0 = int32_to_floatx(val, &env->fp_status);
3483
}
3484

    
3485
void helper_flds_ST0(uint32_t val)
3486
{
3487
    int new_fpstt;
3488
    union {
3489
        float32 f;
3490
        uint32_t i;
3491
    } u;
3492
    new_fpstt = (env->fpstt - 1) & 7;
3493
    u.i = val;
3494
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3495
    env->fpstt = new_fpstt;
3496
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3497
}
3498

    
3499
void helper_fldl_ST0(uint64_t val)
3500
{
3501
    int new_fpstt;
3502
    union {
3503
        float64 f;
3504
        uint64_t i;
3505
    } u;
3506
    new_fpstt = (env->fpstt - 1) & 7;
3507
    u.i = val;
3508
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3509
    env->fpstt = new_fpstt;
3510
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3511
}
3512

    
3513
void helper_fildl_ST0(int32_t val)
3514
{
3515
    int new_fpstt;
3516
    new_fpstt = (env->fpstt - 1) & 7;
3517
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3518
    env->fpstt = new_fpstt;
3519
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3520
}
3521

    
3522
void helper_fildll_ST0(int64_t val)
3523
{
3524
    int new_fpstt;
3525
    new_fpstt = (env->fpstt - 1) & 7;
3526
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3527
    env->fpstt = new_fpstt;
3528
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3529
}
3530

    
3531
uint32_t helper_fsts_ST0(void)
3532
{
3533
    union {
3534
        float32 f;
3535
        uint32_t i;
3536
    } u;
3537
    u.f = floatx_to_float32(ST0, &env->fp_status);
3538
    return u.i;
3539
}
3540

    
3541
uint64_t helper_fstl_ST0(void)
3542
{
3543
    union {
3544
        float64 f;
3545
        uint64_t i;
3546
    } u;
3547
    u.f = floatx_to_float64(ST0, &env->fp_status);
3548
    return u.i;
3549
}
3550

    
3551
int32_t helper_fist_ST0(void)
3552
{
3553
    int32_t val;
3554
    val = floatx_to_int32(ST0, &env->fp_status);
3555
    if (val != (int16_t)val)
3556
        val = -32768;
3557
    return val;
3558
}
3559

    
3560
int32_t helper_fistl_ST0(void)
3561
{
3562
    int32_t val;
3563
    val = floatx_to_int32(ST0, &env->fp_status);
3564
    return val;
3565
}
3566

    
3567
int64_t helper_fistll_ST0(void)
3568
{
3569
    int64_t val;
3570
    val = floatx_to_int64(ST0, &env->fp_status);
3571
    return val;
3572
}
3573

    
3574
int32_t helper_fistt_ST0(void)
3575
{
3576
    int32_t val;
3577
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3578
    if (val != (int16_t)val)
3579
        val = -32768;
3580
    return val;
3581
}
3582

    
3583
int32_t helper_fisttl_ST0(void)
3584
{
3585
    int32_t val;
3586
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3587
    return val;
3588
}
3589

    
3590
int64_t helper_fisttll_ST0(void)
3591
{
3592
    int64_t val;
3593
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3594
    return val;
3595
}
3596

    
3597
void helper_fldt_ST0(target_ulong ptr)
3598
{
3599
    int new_fpstt;
3600
    new_fpstt = (env->fpstt - 1) & 7;
3601
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3602
    env->fpstt = new_fpstt;
3603
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3604
}
3605

    
3606
void helper_fstt_ST0(target_ulong ptr)
3607
{
3608
    helper_fstt(ST0, ptr);
3609
}
3610

    
3611
void helper_fpush(void)
3612
{
3613
    fpush();
3614
}
3615

    
3616
void helper_fpop(void)
3617
{
3618
    fpop();
3619
}
3620

    
3621
void helper_fdecstp(void)
3622
{
3623
    env->fpstt = (env->fpstt - 1) & 7;
3624
    env->fpus &= (~0x4700);
3625
}
3626

    
3627
void helper_fincstp(void)
3628
{
3629
    env->fpstt = (env->fpstt + 1) & 7;
3630
    env->fpus &= (~0x4700);
3631
}
3632

    
3633
/* FPU move */
3634

    
3635
void helper_ffree_STN(int st_index)
3636
{
3637
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3638
}
3639

    
3640
void helper_fmov_ST0_FT0(void)
3641
{
3642
    ST0 = FT0;
3643
}
3644

    
3645
void helper_fmov_FT0_STN(int st_index)
3646
{
3647
    FT0 = ST(st_index);
3648
}
3649

    
3650
void helper_fmov_ST0_STN(int st_index)
3651
{
3652
    ST0 = ST(st_index);
3653
}
3654

    
3655
void helper_fmov_STN_ST0(int st_index)
3656
{
3657
    ST(st_index) = ST0;
3658
}
3659

    
3660
void helper_fxchg_ST0_STN(int st_index)
3661
{
3662
    CPU86_LDouble tmp;
3663
    tmp = ST(st_index);
3664
    ST(st_index) = ST0;
3665
    ST0 = tmp;
3666
}
3667

    
3668
/* FPU operations */
3669

    
3670
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3671

    
3672
void helper_fcom_ST0_FT0(void)
3673
{
3674
    int ret;
3675

    
3676
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3677
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3678
}
3679

    
3680
void helper_fucom_ST0_FT0(void)
3681
{
3682
    int ret;
3683

    
3684
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3685
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3686
}
3687

    
3688
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3689

    
3690
void helper_fcomi_ST0_FT0(void)
3691
{
3692
    int eflags;
3693
    int ret;
3694

    
3695
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3696
    eflags = helper_cc_compute_all(CC_OP);
3697
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3698
    CC_SRC = eflags;
3699
}
3700

    
3701
void helper_fucomi_ST0_FT0(void)
3702
{
3703
    int eflags;
3704
    int ret;
3705

    
3706
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3707
    eflags = helper_cc_compute_all(CC_OP);
3708
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3709
    CC_SRC = eflags;
3710
}
3711

    
3712
void helper_fadd_ST0_FT0(void)
3713
{
3714
    ST0 = floatx_add(ST0, FT0, &env->fp_status);
3715
}
3716

    
3717
void helper_fmul_ST0_FT0(void)
3718
{
3719
    ST0 = floatx_mul(ST0, FT0, &env->fp_status);
3720
}
3721

    
3722
void helper_fsub_ST0_FT0(void)
3723
{
3724
    ST0 = floatx_sub(ST0, FT0, &env->fp_status);
3725
}
3726

    
3727
void helper_fsubr_ST0_FT0(void)
3728
{
3729
    ST0 = floatx_sub(FT0, ST0, &env->fp_status);
3730
}
3731

    
3732
void helper_fdiv_ST0_FT0(void)
3733
{
3734
    ST0 = helper_fdiv(ST0, FT0);
3735
}
3736

    
3737
void helper_fdivr_ST0_FT0(void)
3738
{
3739
    ST0 = helper_fdiv(FT0, ST0);
3740
}
3741

    
3742
/* fp operations between STN and ST0 */
3743

    
3744
void helper_fadd_STN_ST0(int st_index)
3745
{
3746
    ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
3747
}
3748

    
3749
void helper_fmul_STN_ST0(int st_index)
3750
{
3751
    ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
3752
}
3753

    
3754
void helper_fsub_STN_ST0(int st_index)
3755
{
3756
    ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
3757
}
3758

    
3759
void helper_fsubr_STN_ST0(int st_index)
3760
{
3761
    ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
3762
}
3763

    
3764
void helper_fdiv_STN_ST0(int st_index)
3765
{
3766
    CPU86_LDouble *p;
3767
    p = &ST(st_index);
3768
    *p = helper_fdiv(*p, ST0);
3769
}
3770

    
3771
void helper_fdivr_STN_ST0(int st_index)
3772
{
3773
    CPU86_LDouble *p;
3774
    p = &ST(st_index);
3775
    *p = helper_fdiv(ST0, *p);
3776
}
3777

    
3778
/* misc FPU operations */
3779
void helper_fchs_ST0(void)
3780
{
3781
    ST0 = floatx_chs(ST0);
3782
}
3783

    
3784
void helper_fabs_ST0(void)
3785
{
3786
    ST0 = floatx_abs(ST0);
3787
}
3788

    
3789
void helper_fld1_ST0(void)
3790
{
3791
    ST0 = f15rk[1];
3792
}
3793

    
3794
void helper_fldl2t_ST0(void)
3795
{
3796
    ST0 = f15rk[6];
3797
}
3798

    
3799
void helper_fldl2e_ST0(void)
3800
{
3801
    ST0 = f15rk[5];
3802
}
3803

    
3804
void helper_fldpi_ST0(void)
3805
{
3806
    ST0 = f15rk[2];
3807
}
3808

    
3809
void helper_fldlg2_ST0(void)
3810
{
3811
    ST0 = f15rk[3];
3812
}
3813

    
3814
void helper_fldln2_ST0(void)
3815
{
3816
    ST0 = f15rk[4];
3817
}
3818

    
3819
void helper_fldz_ST0(void)
3820
{
3821
    ST0 = f15rk[0];
3822
}
3823

    
3824
void helper_fldz_FT0(void)
3825
{
3826
    FT0 = f15rk[0];
3827
}
3828

    
3829
uint32_t helper_fnstsw(void)
3830
{
3831
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3832
}
3833

    
3834
uint32_t helper_fnstcw(void)
3835
{
3836
    return env->fpuc;
3837
}
3838

    
3839
static void update_fp_status(void)
3840
{
3841
    int rnd_type;
3842

    
3843
    /* set rounding mode */
3844
    switch(env->fpuc & RC_MASK) {
3845
    default:
3846
    case RC_NEAR:
3847
        rnd_type = float_round_nearest_even;
3848
        break;
3849
    case RC_DOWN:
3850
        rnd_type = float_round_down;
3851
        break;
3852
    case RC_UP:
3853
        rnd_type = float_round_up;
3854
        break;
3855
    case RC_CHOP:
3856
        rnd_type = float_round_to_zero;
3857
        break;
3858
    }
3859
    set_float_rounding_mode(rnd_type, &env->fp_status);
3860
#ifdef FLOATX80
3861
    switch((env->fpuc >> 8) & 3) {
3862
    case 0:
3863
        rnd_type = 32;
3864
        break;
3865
    case 2:
3866
        rnd_type = 64;
3867
        break;
3868
    case 3:
3869
    default:
3870
        rnd_type = 80;
3871
        break;
3872
    }
3873
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3874
#endif
3875
}
3876

    
3877
void helper_fldcw(uint32_t val)
3878
{
3879
    env->fpuc = val;
3880
    update_fp_status();
3881
}
3882

    
3883
void helper_fclex(void)
3884
{
3885
    env->fpus &= 0x7f00;
3886
}
3887

    
3888
void helper_fwait(void)
3889
{
3890
    if (env->fpus & FPUS_SE)
3891
        fpu_raise_exception();
3892
}
3893

    
3894
void helper_fninit(void)
3895
{
3896
    env->fpus = 0;
3897
    env->fpstt = 0;
3898
    env->fpuc = 0x37f;
3899
    env->fptags[0] = 1;
3900
    env->fptags[1] = 1;
3901
    env->fptags[2] = 1;
3902
    env->fptags[3] = 1;
3903
    env->fptags[4] = 1;
3904
    env->fptags[5] = 1;
3905
    env->fptags[6] = 1;
3906
    env->fptags[7] = 1;
3907
}
3908

    
3909
/* BCD ops */
3910

    
3911
void helper_fbld_ST0(target_ulong ptr)
3912
{
3913
    CPU86_LDouble tmp;
3914
    uint64_t val;
3915
    unsigned int v;
3916
    int i;
3917

    
3918
    val = 0;
3919
    for(i = 8; i >= 0; i--) {
3920
        v = ldub(ptr + i);
3921
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3922
    }
3923
    tmp = val;
3924
    if (ldub(ptr + 9) & 0x80)
3925
        tmp = -tmp;
3926
    fpush();
3927
    ST0 = tmp;
3928
}
3929

    
3930
void helper_fbst_ST0(target_ulong ptr)
3931
{
3932
    int v;
3933
    target_ulong mem_ref, mem_end;
3934
    int64_t val;
3935

    
3936
    val = floatx_to_int64(ST0, &env->fp_status);
3937
    mem_ref = ptr;
3938
    mem_end = mem_ref + 9;
3939
    if (val < 0) {
3940
        stb(mem_end, 0x80);
3941
        val = -val;
3942
    } else {
3943
        stb(mem_end, 0x00);
3944
    }
3945
    while (mem_ref < mem_end) {
3946
        if (val == 0)
3947
            break;
3948
        v = val % 100;
3949
        val = val / 100;
3950
        v = ((v / 10) << 4) | (v % 10);
3951
        stb(mem_ref++, v);
3952
    }
3953
    while (mem_ref < mem_end) {
3954
        stb(mem_ref++, 0);
3955
    }
3956
}
3957

    
3958
void helper_f2xm1(void)
3959
{
3960
    ST0 = pow(2.0,ST0) - 1.0;
3961
}
3962

    
3963
void helper_fyl2x(void)
3964
{
3965
    CPU86_LDouble fptemp;
3966

    
3967
    fptemp = ST0;
3968
    if (fptemp>0.0){
3969
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3970
        ST1 *= fptemp;
3971
        fpop();
3972
    } else {
3973
        env->fpus &= (~0x4700);
3974
        env->fpus |= 0x400;
3975
    }
3976
}
3977

    
3978
void helper_fptan(void)
3979
{
3980
    CPU86_LDouble fptemp;
3981

    
3982
    fptemp = ST0;
3983
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3984
        env->fpus |= 0x400;
3985
    } else {
3986
        ST0 = tan(fptemp);
3987
        fpush();
3988
        ST0 = 1.0;
3989
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3990
        /* the above code is for  |arg| < 2**52 only */
3991
    }
3992
}
3993

    
3994
void helper_fpatan(void)
3995
{
3996
    CPU86_LDouble fptemp, fpsrcop;
3997

    
3998
    fpsrcop = ST1;
3999
    fptemp = ST0;
4000
    ST1 = atan2(fpsrcop,fptemp);
4001
    fpop();
4002
}
4003

    
4004
void helper_fxtract(void)
4005
{
4006
    CPU86_LDoubleU temp;
4007
    unsigned int expdif;
4008

    
4009
    temp.d = ST0;
4010
    expdif = EXPD(temp) - EXPBIAS;
4011
    /*DP exponent bias*/
4012
    ST0 = expdif;
4013
    fpush();
4014
    BIASEXPONENT(temp);
4015
    ST0 = temp.d;
4016
}
4017

    
4018
void helper_fprem1(void)
4019
{
4020
    CPU86_LDouble dblq, fpsrcop, fptemp;
4021
    CPU86_LDoubleU fpsrcop1, fptemp1;
4022
    int expdif;
4023
    signed long long int q;
4024

    
4025
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4026
        ST0 = 0.0 / 0.0; /* NaN */
4027
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4028
        return;
4029
    }
4030

    
4031
    fpsrcop = ST0;
4032
    fptemp = ST1;
4033
    fpsrcop1.d = fpsrcop;
4034
    fptemp1.d = fptemp;
4035
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4036

    
4037
    if (expdif < 0) {
4038
        /* optimisation? taken from the AMD docs */
4039
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4040
        /* ST0 is unchanged */
4041
        return;
4042
    }
4043

    
4044
    if (expdif < 53) {
4045
        dblq = fpsrcop / fptemp;
4046
        /* round dblq towards nearest integer */
4047
        dblq = rint(dblq);
4048
        ST0 = fpsrcop - fptemp * dblq;
4049

    
4050
        /* convert dblq to q by truncating towards zero */
4051
        if (dblq < 0.0)
4052
           q = (signed long long int)(-dblq);
4053
        else
4054
           q = (signed long long int)dblq;
4055

    
4056
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4057
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4058
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4059
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4060
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4061
    } else {
4062
        env->fpus |= 0x400;  /* C2 <-- 1 */
4063
        fptemp = pow(2.0, expdif - 50);
4064
        fpsrcop = (ST0 / ST1) / fptemp;
4065
        /* fpsrcop = integer obtained by chopping */
4066
        fpsrcop = (fpsrcop < 0.0) ?
4067
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4068
        ST0 -= (ST1 * fpsrcop * fptemp);
4069
    }
4070
}
4071

    
4072
void helper_fprem(void)
4073
{
4074
    CPU86_LDouble dblq, fpsrcop, fptemp;
4075
    CPU86_LDoubleU fpsrcop1, fptemp1;
4076
    int expdif;
4077
    signed long long int q;
4078

    
4079
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4080
       ST0 = 0.0 / 0.0; /* NaN */
4081
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4082
       return;
4083
    }
4084

    
4085
    fpsrcop = (CPU86_LDouble)ST0;
4086
    fptemp = (CPU86_LDouble)ST1;
4087
    fpsrcop1.d = fpsrcop;
4088
    fptemp1.d = fptemp;
4089
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4090

    
4091
    if (expdif < 0) {
4092
        /* optimisation? taken from the AMD docs */
4093
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4094
        /* ST0 is unchanged */
4095
        return;
4096
    }
4097

    
4098
    if ( expdif < 53 ) {
4099
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4100
        /* round dblq towards zero */
4101
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4102
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4103

    
4104
        /* convert dblq to q by truncating towards zero */
4105
        if (dblq < 0.0)
4106
           q = (signed long long int)(-dblq);
4107
        else
4108
           q = (signed long long int)dblq;
4109

    
4110
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4111
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4112
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4113
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4114
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4115
    } else {
4116
        int N = 32 + (expdif % 32); /* as per AMD docs */
4117
        env->fpus |= 0x400;  /* C2 <-- 1 */
4118
        fptemp = pow(2.0, (double)(expdif - N));
4119
        fpsrcop = (ST0 / ST1) / fptemp;
4120
        /* fpsrcop = integer obtained by chopping */
4121
        fpsrcop = (fpsrcop < 0.0) ?
4122
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4123
        ST0 -= (ST1 * fpsrcop * fptemp);
4124
    }
4125
}
4126

    
4127
void helper_fyl2xp1(void)
4128
{
4129
    CPU86_LDouble fptemp;
4130

    
4131
    fptemp = ST0;
4132
    if ((fptemp+1.0)>0.0) {
4133
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4134
        ST1 *= fptemp;
4135
        fpop();
4136
    } else {
4137
        env->fpus &= (~0x4700);
4138
        env->fpus |= 0x400;
4139
    }
4140
}
4141

    
4142
void helper_fsqrt(void)
4143
{
4144
    CPU86_LDouble fptemp;
4145

    
4146
    fptemp = ST0;
4147
    if (fptemp<0.0) {
4148
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4149
        env->fpus |= 0x400;
4150
    }
4151
    ST0 = sqrt(fptemp);
4152
}
4153

    
4154
void helper_fsincos(void)
4155
{
4156
    CPU86_LDouble fptemp;
4157

    
4158
    fptemp = ST0;
4159
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4160
        env->fpus |= 0x400;
4161
    } else {
4162
        ST0 = sin(fptemp);
4163
        fpush();
4164
        ST0 = cos(fptemp);
4165
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4166
        /* the above code is for  |arg| < 2**63 only */
4167
    }
4168
}
4169

    
4170
void helper_frndint(void)
4171
{
4172
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4173
}
4174

    
4175
void helper_fscale(void)
4176
{
4177
    if (floatx_is_any_nan(ST1)) {
4178
        ST0 = ST1;
4179
    } else {
4180
        int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status);
4181
        ST0 = floatx_scalbn(ST0, n, &env->fp_status);
4182
    }
4183
}
4184

    
4185
void helper_fsin(void)
4186
{
4187
    CPU86_LDouble fptemp;
4188

    
4189
    fptemp = ST0;
4190
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4191
        env->fpus |= 0x400;
4192
    } else {
4193
        ST0 = sin(fptemp);
4194
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4195
        /* the above code is for  |arg| < 2**53 only */
4196
    }
4197
}
4198

    
4199
void helper_fcos(void)
4200
{
4201
    CPU86_LDouble fptemp;
4202

    
4203
    fptemp = ST0;
4204
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4205
        env->fpus |= 0x400;
4206
    } else {
4207
        ST0 = cos(fptemp);
4208
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4209
        /* the above code is for  |arg5 < 2**63 only */
4210
    }
4211
}
4212

    
4213
void helper_fxam_ST0(void)
4214
{
4215
    CPU86_LDoubleU temp;
4216
    int expdif;
4217

    
4218
    temp.d = ST0;
4219

    
4220
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4221
    if (SIGND(temp))
4222
        env->fpus |= 0x200; /* C1 <-- 1 */
4223

    
4224
    /* XXX: test fptags too */
4225
    expdif = EXPD(temp);
4226
    if (expdif == MAXEXPD) {
4227
#ifdef USE_X86LDOUBLE
4228
        if (MANTD(temp) == 0x8000000000000000ULL)
4229
#else
4230
        if (MANTD(temp) == 0)
4231
#endif
4232
            env->fpus |=  0x500 /*Infinity*/;
4233
        else
4234
            env->fpus |=  0x100 /*NaN*/;
4235
    } else if (expdif == 0) {
4236
        if (MANTD(temp) == 0)
4237
            env->fpus |=  0x4000 /*Zero*/;
4238
        else
4239
            env->fpus |= 0x4400 /*Denormal*/;
4240
    } else {
4241
        env->fpus |= 0x400;
4242
    }
4243
}
4244

    
4245
void helper_fstenv(target_ulong ptr, int data32)
4246
{
4247
    int fpus, fptag, exp, i;
4248
    uint64_t mant;
4249
    CPU86_LDoubleU tmp;
4250

    
4251
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4252
    fptag = 0;
4253
    for (i=7; i>=0; i--) {
4254
        fptag <<= 2;
4255
        if (env->fptags[i]) {
4256
            fptag |= 3;
4257
        } else {
4258
            tmp.d = env->fpregs[i].d;
4259
            exp = EXPD(tmp);
4260
            mant = MANTD(tmp);
4261
            if (exp == 0 && mant == 0) {
4262
                /* zero */
4263
                fptag |= 1;
4264
            } else if (exp == 0 || exp == MAXEXPD
4265
#ifdef USE_X86LDOUBLE
4266
                       || (mant & (1LL << 63)) == 0
4267
#endif
4268
                       ) {
4269
                /* NaNs, infinity, denormal */
4270
                fptag |= 2;
4271
            }
4272
        }
4273
    }
4274
    if (data32) {
4275
        /* 32 bit */
4276
        stl(ptr, env->fpuc);
4277
        stl(ptr + 4, fpus);
4278
        stl(ptr + 8, fptag);
4279
        stl(ptr + 12, 0); /* fpip */
4280
        stl(ptr + 16, 0); /* fpcs */
4281
        stl(ptr + 20, 0); /* fpoo */
4282
        stl(ptr + 24, 0); /* fpos */
4283
    } else {
4284
        /* 16 bit */
4285
        stw(ptr, env->fpuc);
4286
        stw(ptr + 2, fpus);
4287
        stw(ptr + 4, fptag);
4288
        stw(ptr + 6, 0);
4289
        stw(ptr + 8, 0);
4290
        stw(ptr + 10, 0);
4291
        stw(ptr + 12, 0);
4292
    }
4293
}
4294

    
4295
void helper_fldenv(target_ulong ptr, int data32)
4296
{
4297
    int i, fpus, fptag;
4298

    
4299
    if (data32) {
4300
        env->fpuc = lduw(ptr);
4301
        fpus = lduw(ptr + 4);
4302
        fptag = lduw(ptr + 8);
4303
    }
4304
    else {
4305
        env->fpuc = lduw(ptr);
4306
        fpus = lduw(ptr + 2);
4307
        fptag = lduw(ptr + 4);
4308
    }
4309
    env->fpstt = (fpus >> 11) & 7;
4310
    env->fpus = fpus & ~0x3800;
4311
    for(i = 0;i < 8; i++) {
4312
        env->fptags[i] = ((fptag & 3) == 3);
4313
        fptag >>= 2;
4314
    }
4315
}
4316

    
4317
void helper_fsave(target_ulong ptr, int data32)
4318
{
4319
    CPU86_LDouble tmp;
4320
    int i;
4321

    
4322
    helper_fstenv(ptr, data32);
4323

    
4324
    ptr += (14 << data32);
4325
    for(i = 0;i < 8; i++) {
4326
        tmp = ST(i);
4327
        helper_fstt(tmp, ptr);
4328
        ptr += 10;
4329
    }
4330

    
4331
    /* fninit */
4332
    env->fpus = 0;
4333
    env->fpstt = 0;
4334
    env->fpuc = 0x37f;
4335
    env->fptags[0] = 1;
4336
    env->fptags[1] = 1;
4337
    env->fptags[2] = 1;
4338
    env->fptags[3] = 1;
4339
    env->fptags[4] = 1;
4340
    env->fptags[5] = 1;
4341
    env->fptags[6] = 1;
4342
    env->fptags[7] = 1;
4343
}
4344

    
4345
void helper_frstor(target_ulong ptr, int data32)
4346
{
4347
    CPU86_LDouble tmp;
4348
    int i;
4349

    
4350
    helper_fldenv(ptr, data32);
4351
    ptr += (14 << data32);
4352

    
4353
    for(i = 0;i < 8; i++) {
4354
        tmp = helper_fldt(ptr);
4355
        ST(i) = tmp;
4356
        ptr += 10;
4357
    }
4358
}
4359

    
4360
void helper_fxsave(target_ulong ptr, int data64)
4361
{
4362
    int fpus, fptag, i, nb_xmm_regs;
4363
    CPU86_LDouble tmp;
4364
    target_ulong addr;
4365

    
4366
    /* The operand must be 16 byte aligned */
4367
    if (ptr & 0xf) {
4368
        raise_exception(EXCP0D_GPF);
4369
    }
4370

    
4371
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4372
    fptag = 0;
4373
    for(i = 0; i < 8; i++) {
4374
        fptag |= (env->fptags[i] << i);
4375
    }
4376
    stw(ptr, env->fpuc);
4377
    stw(ptr + 2, fpus);
4378
    stw(ptr + 4, fptag ^ 0xff);
4379
#ifdef TARGET_X86_64
4380
    if (data64) {
4381
        stq(ptr + 0x08, 0); /* rip */
4382
        stq(ptr + 0x10, 0); /* rdp */
4383
    } else 
4384
#endif
4385
    {
4386
        stl(ptr + 0x08, 0); /* eip */
4387
        stl(ptr + 0x0c, 0); /* sel  */
4388
        stl(ptr + 0x10, 0); /* dp */
4389
        stl(ptr + 0x14, 0); /* sel  */
4390
    }
4391

    
4392
    addr = ptr + 0x20;
4393
    for(i = 0;i < 8; i++) {
4394
        tmp = ST(i);
4395
        helper_fstt(tmp, addr);
4396
        addr += 16;
4397
    }
4398

    
4399
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4400
        /* XXX: finish it */
4401
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4402
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4403
        if (env->hflags & HF_CS64_MASK)
4404
            nb_xmm_regs = 16;
4405
        else
4406
            nb_xmm_regs = 8;
4407
        addr = ptr + 0xa0;
4408
        /* Fast FXSAVE leaves out the XMM registers */
4409
        if (!(env->efer & MSR_EFER_FFXSR)
4410
          || (env->hflags & HF_CPL_MASK)
4411
          || !(env->hflags & HF_LMA_MASK)) {
4412
            for(i = 0; i < nb_xmm_regs; i++) {
4413
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4414
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4415
                addr += 16;
4416
            }
4417
        }
4418
    }
4419
}
4420

    
4421
void helper_fxrstor(target_ulong ptr, int data64)
4422
{
4423
    int i, fpus, fptag, nb_xmm_regs;
4424
    CPU86_LDouble tmp;
4425
    target_ulong addr;
4426

    
4427
    /* The operand must be 16 byte aligned */
4428
    if (ptr & 0xf) {
4429
        raise_exception(EXCP0D_GPF);
4430
    }
4431

    
4432
    env->fpuc = lduw(ptr);
4433
    fpus = lduw(ptr + 2);
4434
    fptag = lduw(ptr + 4);
4435
    env->fpstt = (fpus >> 11) & 7;
4436
    env->fpus = fpus & ~0x3800;
4437
    fptag ^= 0xff;
4438
    for(i = 0;i < 8; i++) {
4439
        env->fptags[i] = ((fptag >> i) & 1);
4440
    }
4441

    
4442
    addr = ptr + 0x20;
4443
    for(i = 0;i < 8; i++) {
4444
        tmp = helper_fldt(addr);
4445
        ST(i) = tmp;
4446
        addr += 16;
4447
    }
4448

    
4449
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4450
        /* XXX: finish it */
4451
        env->mxcsr = ldl(ptr + 0x18);
4452
        //ldl(ptr + 0x1c);
4453
        if (env->hflags & HF_CS64_MASK)
4454
            nb_xmm_regs = 16;
4455
        else
4456
            nb_xmm_regs = 8;
4457
        addr = ptr + 0xa0;
4458
        /* Fast FXRESTORE leaves out the XMM registers */
4459
        if (!(env->efer & MSR_EFER_FFXSR)
4460
          || (env->hflags & HF_CPL_MASK)
4461
          || !(env->hflags & HF_LMA_MASK)) {
4462
            for(i = 0; i < nb_xmm_regs; i++) {
4463
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4464
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4465
                addr += 16;
4466
            }
4467
        }
4468
    }
4469
}
4470

    
4471
#ifndef USE_X86LDOUBLE
4472

    
4473
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4474
{
4475
    CPU86_LDoubleU temp;
4476
    int e;
4477

    
4478
    temp.d = f;
4479
    /* mantissa */
4480
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4481
    /* exponent + sign */
4482
    e = EXPD(temp) - EXPBIAS + 16383;
4483
    e |= SIGND(temp) >> 16;
4484
    *pexp = e;
4485
}
4486

    
4487
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4488
{
4489
    CPU86_LDoubleU temp;
4490
    int e;
4491
    uint64_t ll;
4492

    
4493
    /* XXX: handle overflow ? */
4494
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4495
    e |= (upper >> 4) & 0x800; /* sign */
4496
    ll = (mant >> 11) & ((1LL << 52) - 1);
4497
#ifdef __arm__
4498
    temp.l.upper = (e << 20) | (ll >> 32);
4499
    temp.l.lower = ll;
4500
#else
4501
    temp.ll = ll | ((uint64_t)e << 52);
4502
#endif
4503
    return temp.d;
4504
}
4505

    
4506
#else
4507

    
4508
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4509
{
4510
    CPU86_LDoubleU temp;
4511

    
4512
    temp.d = f;
4513
    *pmant = temp.l.lower;
4514
    *pexp = temp.l.upper;
4515
}
4516

    
4517
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4518
{
4519
    CPU86_LDoubleU temp;
4520

    
4521
    temp.l.upper = upper;
4522
    temp.l.lower = mant;
4523
    return temp.d;
4524
}
4525
#endif
4526

    
4527
#ifdef TARGET_X86_64
4528

    
4529
//#define DEBUG_MULDIV
4530

    
4531
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4532
{
4533
    *plow += a;
4534
    /* carry test */
4535
    if (*plow < a)
4536
        (*phigh)++;
4537
    *phigh += b;
4538
}
4539

    
4540
static void neg128(uint64_t *plow, uint64_t *phigh)
4541
{
4542
    *plow = ~ *plow;
4543
    *phigh = ~ *phigh;
4544
    add128(plow, phigh, 1, 0);
4545
}
4546

    
4547
/* return TRUE if overflow */
4548
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4549
{
4550
    uint64_t q, r, a1, a0;
4551
    int i, qb, ab;
4552

    
4553
    a0 = *plow;
4554
    a1 = *phigh;
4555
    if (a1 == 0) {
4556
        q = a0 / b;
4557
        r = a0 % b;
4558
        *plow = q;
4559
        *phigh = r;
4560
    } else {
4561
        if (a1 >= b)
4562
            return 1;
4563
        /* XXX: use a better algorithm */
4564
        for(i = 0; i < 64; i++) {
4565
            ab = a1 >> 63;
4566
            a1 = (a1 << 1) | (a0 >> 63);
4567
            if (ab || a1 >= b) {
4568
                a1 -= b;
4569
                qb = 1;
4570
            } else {
4571
                qb = 0;
4572
            }
4573
            a0 = (a0 << 1) | qb;
4574
        }
4575
#if defined(DEBUG_MULDIV)
4576
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4577
               *phigh, *plow, b, a0, a1);
4578
#endif
4579
        *plow = a0;
4580
        *phigh = a1;
4581
    }
4582
    return 0;
4583
}
4584

    
4585
/* return TRUE if overflow */
4586
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4587
{
4588
    int sa, sb;
4589
    sa = ((int64_t)*phigh < 0);
4590
    if (sa)
4591
        neg128(plow, phigh);
4592
    sb = (b < 0);
4593
    if (sb)
4594
        b = -b;
4595
    if (div64(plow, phigh, b) != 0)
4596
        return 1;
4597
    if (sa ^ sb) {
4598
        if (*plow > (1ULL << 63))
4599
            return 1;
4600
        *plow = - *plow;
4601
    } else {
4602
        if (*plow >= (1ULL << 63))
4603
            return 1;
4604
    }
4605
    if (sa)
4606
        *phigh = - *phigh;
4607
    return 0;
4608
}
4609

    
4610
void helper_mulq_EAX_T0(target_ulong t0)
4611
{
4612
    uint64_t r0, r1;
4613

    
4614
    mulu64(&r0, &r1, EAX, t0);
4615
    EAX = r0;
4616
    EDX = r1;
4617
    CC_DST = r0;
4618
    CC_SRC = r1;
4619
}
4620

    
4621
void helper_imulq_EAX_T0(target_ulong t0)
4622
{
4623
    uint64_t r0, r1;
4624

    
4625
    muls64(&r0, &r1, EAX, t0);
4626
    EAX = r0;
4627
    EDX = r1;
4628
    CC_DST = r0;
4629
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4630
}
4631

    
4632
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4633
{
4634
    uint64_t r0, r1;
4635

    
4636
    muls64(&r0, &r1, t0, t1);
4637
    CC_DST = r0;
4638
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4639
    return r0;
4640
}
4641

    
4642
void helper_divq_EAX(target_ulong t0)
4643
{
4644
    uint64_t r0, r1;
4645
    if (t0 == 0) {
4646
        raise_exception(EXCP00_DIVZ);
4647
    }
4648
    r0 = EAX;
4649
    r1 = EDX;
4650
    if (div64(&r0, &r1, t0))
4651
        raise_exception(EXCP00_DIVZ);
4652
    EAX = r0;
4653
    EDX = r1;
4654
}
4655

    
4656
void helper_idivq_EAX(target_ulong t0)
4657
{
4658
    uint64_t r0, r1;
4659
    if (t0 == 0) {
4660
        raise_exception(EXCP00_DIVZ);
4661
    }
4662
    r0 = EAX;
4663
    r1 = EDX;
4664
    if (idiv64(&r0, &r1, t0))
4665
        raise_exception(EXCP00_DIVZ);
4666
    EAX = r0;
4667
    EDX = r1;
4668
}
4669
#endif
4670

    
4671
static void do_hlt(void)
4672
{
4673
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4674
    env->halted = 1;
4675
    env->exception_index = EXCP_HLT;
4676
    cpu_loop_exit();
4677
}
4678

    
4679
void helper_hlt(int next_eip_addend)
4680
{
4681
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4682
    EIP += next_eip_addend;
4683
    
4684
    do_hlt();
4685
}
4686

    
4687
void helper_monitor(target_ulong ptr)
4688
{
4689
    if ((uint32_t)ECX != 0)
4690
        raise_exception(EXCP0D_GPF);
4691
    /* XXX: store address ? */
4692
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4693
}
4694

    
4695
void helper_mwait(int next_eip_addend)
4696
{
4697
    if ((uint32_t)ECX != 0)
4698
        raise_exception(EXCP0D_GPF);
4699
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4700
    EIP += next_eip_addend;
4701

    
4702
    /* XXX: not complete but not completely erroneous */
4703
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4704
        /* more than one CPU: do not sleep because another CPU may
4705
           wake this one */
4706
    } else {
4707
        do_hlt();
4708
    }
4709
}
4710

    
4711
void helper_debug(void)
4712
{
4713
    env->exception_index = EXCP_DEBUG;
4714
    cpu_loop_exit();
4715
}
4716

    
4717
void helper_reset_rf(void)
4718
{
4719
    env->eflags &= ~RF_MASK;
4720
}
4721

    
4722
void helper_raise_interrupt(int intno, int next_eip_addend)
4723
{
4724
    raise_interrupt(intno, 1, 0, next_eip_addend);
4725
}
4726

    
4727
void helper_raise_exception(int exception_index)
4728
{
4729
    raise_exception(exception_index);
4730
}
4731

    
4732
void helper_cli(void)
4733
{
4734
    env->eflags &= ~IF_MASK;
4735
}
4736

    
4737
void helper_sti(void)
4738
{
4739
    env->eflags |= IF_MASK;
4740
}
4741

    
4742
#if 0
4743
/* vm86plus instructions */
4744
void helper_cli_vm(void)
4745
{
4746
    env->eflags &= ~VIF_MASK;
4747
}
4748

4749
void helper_sti_vm(void)
4750
{
4751
    env->eflags |= VIF_MASK;
4752
    if (env->eflags & VIP_MASK) {
4753
        raise_exception(EXCP0D_GPF);
4754
    }
4755
}
4756
#endif
4757

    
4758
void helper_set_inhibit_irq(void)
4759
{
4760
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4761
}
4762

    
4763
void helper_reset_inhibit_irq(void)
4764
{
4765
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4766
}
4767

    
4768
void helper_boundw(target_ulong a0, int v)
4769
{
4770
    int low, high;
4771
    low = ldsw(a0);
4772
    high = ldsw(a0 + 2);
4773
    v = (int16_t)v;
4774
    if (v < low || v > high) {
4775
        raise_exception(EXCP05_BOUND);
4776
    }
4777
}
4778

    
4779
void helper_boundl(target_ulong a0, int v)
4780
{
4781
    int low, high;
4782
    low = ldl(a0);
4783
    high = ldl(a0 + 4);
4784
    if (v < low || v > high) {
4785
        raise_exception(EXCP05_BOUND);
4786
    }
4787
}
4788

    
4789
static float approx_rsqrt(float a)
4790
{
4791
    return 1.0 / sqrt(a);
4792
}
4793

    
4794
static float approx_rcp(float a)
4795
{
4796
    return 1.0 / a;
4797
}
4798

    
4799
#if !defined(CONFIG_USER_ONLY)
4800

    
4801
#define MMUSUFFIX _mmu
4802

    
4803
#define SHIFT 0
4804
#include "softmmu_template.h"
4805

    
4806
#define SHIFT 1
4807
#include "softmmu_template.h"
4808

    
4809
#define SHIFT 2
4810
#include "softmmu_template.h"
4811

    
4812
#define SHIFT 3
4813
#include "softmmu_template.h"
4814

    
4815
#endif
4816

    
4817
#if !defined(CONFIG_USER_ONLY)
4818
/* try to fill the TLB and return an exception if error. If retaddr is
4819
   NULL, it means that the function was called in C code (i.e. not
4820
   from generated code or from helper.c) */
4821
/* XXX: fix it to restore all registers */
4822
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4823
{
4824
    TranslationBlock *tb;
4825
    int ret;
4826
    unsigned long pc;
4827
    CPUX86State *saved_env;
4828

    
4829
    /* XXX: hack to restore env in all cases, even if not called from
4830
       generated code */
4831
    saved_env = env;
4832
    env = cpu_single_env;
4833

    
4834
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4835
    if (ret) {
4836
        if (retaddr) {
4837
            /* now we have a real cpu fault */
4838
            pc = (unsigned long)retaddr;
4839
            tb = tb_find_pc(pc);
4840
            if (tb) {
4841
                /* the PC is inside the translated code. It means that we have
4842
                   a virtual CPU fault */
4843
                cpu_restore_state(tb, env, pc);
4844
            }
4845
        }
4846
        raise_exception_err(env->exception_index, env->error_code);
4847
    }
4848
    env = saved_env;
4849
}
4850
#endif
4851

    
4852
/* Secure Virtual Machine helpers */
4853

    
4854
#if defined(CONFIG_USER_ONLY)
4855

    
4856
void helper_vmrun(int aflag, int next_eip_addend)
4857
{ 
4858
}
4859
void helper_vmmcall(void) 
4860
{ 
4861
}
4862
void helper_vmload(int aflag)
4863
{ 
4864
}
4865
void helper_vmsave(int aflag)
4866
{ 
4867
}
4868
void helper_stgi(void)
4869
{
4870
}
4871
void helper_clgi(void)
4872
{
4873
}
4874
void helper_skinit(void) 
4875
{ 
4876
}
4877
void helper_invlpga(int aflag)
4878
{ 
4879
}
4880
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4881
{ 
4882
}
4883
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4884
{
4885
}
4886

    
4887
void helper_svm_check_io(uint32_t port, uint32_t param, 
4888
                         uint32_t next_eip_addend)
4889
{
4890
}
4891
#else
4892

    
4893
static inline void svm_save_seg(target_phys_addr_t addr,
4894
                                const SegmentCache *sc)
4895
{
4896
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4897
             sc->selector);
4898
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4899
             sc->base);
4900
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4901
             sc->limit);
4902
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4903
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4904
}
4905
                                
4906
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4907
{
4908
    unsigned int flags;
4909

    
4910
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4911
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4912
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4913
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4914
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4915
}
4916

    
4917
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4918
                                      CPUState *env, int seg_reg)
4919
{
4920
    SegmentCache sc1, *sc = &sc1;
4921
    svm_load_seg(addr, sc);
4922
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4923
                           sc->base, sc->limit, sc->flags);
4924
}
4925

    
4926
void helper_vmrun(int aflag, int next_eip_addend)
4927
{
4928
    target_ulong addr;
4929
    uint32_t event_inj;
4930
    uint32_t int_ctl;
4931

    
4932
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4933

    
4934
    if (aflag == 2)
4935
        addr = EAX;
4936
    else
4937
        addr = (uint32_t)EAX;
4938

    
4939
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4940

    
4941
    env->vm_vmcb = addr;
4942

    
4943
    /* save the current CPU state in the hsave page */
4944
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4945
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4946

    
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4948
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4949

    
4950
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4951
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4952
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4953
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4954
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4955
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4956

    
4957
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4958
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4959

    
4960
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4961
                  &env->segs[R_ES]);
4962
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4963
                 &env->segs[R_CS]);
4964
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4965
                 &env->segs[R_SS]);
4966
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4967
                 &env->segs[R_DS]);
4968

    
4969
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4970
             EIP + next_eip_addend);
4971
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4972
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4973

    
4974
    /* load the interception bitmaps so we do not need to access the
4975
       vmcb in svm mode */
4976
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4977
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4978
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4979
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4980
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4981
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4982

    
4983
    /* enable intercepts */
4984
    env->hflags |= HF_SVMI_MASK;
4985

    
4986
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4987

    
4988
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4989
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4990

    
4991
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4992
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4993

    
4994
    /* clear exit_info_2 so we behave like the real hardware */
4995
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4996

    
4997
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4998
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4999
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5000
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5001
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5002
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5003
    if (int_ctl & V_INTR_MASKING_MASK) {
5004
        env->v_tpr = int_ctl & V_TPR_MASK;
5005
        env->hflags2 |= HF2_VINTR_MASK;
5006
        if (env->eflags & IF_MASK)
5007
            env->hflags2 |= HF2_HIF_MASK;
5008
    }
5009

    
5010
    cpu_load_efer(env, 
5011
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5012
    env->eflags = 0;
5013
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5014
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5015
    CC_OP = CC_OP_EFLAGS;
5016

    
5017
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5018
                       env, R_ES);
5019
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5020
                       env, R_CS);
5021
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5022
                       env, R_SS);
5023
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5024
                       env, R_DS);
5025

    
5026
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5027
    env->eip = EIP;
5028
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5029
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5030
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5031
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5032
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5033

    
5034
    /* FIXME: guest state consistency checks */
5035

    
5036
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5037
        case TLB_CONTROL_DO_NOTHING:
5038
            break;
5039
        case TLB_CONTROL_FLUSH_ALL_ASID:
5040
            /* FIXME: this is not 100% correct but should work for now */
5041
            tlb_flush(env, 1);
5042
        break;
5043
    }
5044

    
5045
    env->hflags2 |= HF2_GIF_MASK;
5046

    
5047
    if (int_ctl & V_IRQ_MASK) {
5048
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5049
    }
5050

    
5051
    /* maybe we need to inject an event */
5052
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5053
    if (event_inj & SVM_EVTINJ_VALID) {
5054
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5055
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5056
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5057

    
5058
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5059
        /* FIXME: need to implement valid_err */
5060
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5061
        case SVM_EVTINJ_TYPE_INTR:
5062
                env->exception_index = vector;
5063
                env->error_code = event_inj_err;
5064
                env->exception_is_int = 0;
5065
                env->exception_next_eip = -1;
5066
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5067
                /* XXX: is it always correct ? */
5068
                do_interrupt(vector, 0, 0, 0, 1);
5069
                break;
5070
        case SVM_EVTINJ_TYPE_NMI:
5071
                env->exception_index = EXCP02_NMI;
5072
                env->error_code = event_inj_err;
5073
                env->exception_is_int = 0;
5074
                env->exception_next_eip = EIP;
5075
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5076
                cpu_loop_exit();
5077
                break;
5078
        case SVM_EVTINJ_TYPE_EXEPT:
5079
                env->exception_index = vector;
5080
                env->error_code = event_inj_err;
5081
                env->exception_is_int = 0;
5082
                env->exception_next_eip = -1;
5083
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5084
                cpu_loop_exit();
5085
                break;
5086
        case SVM_EVTINJ_TYPE_SOFT:
5087
                env->exception_index = vector;
5088
                env->error_code = event_inj_err;
5089
                env->exception_is_int = 1;
5090
                env->exception_next_eip = EIP;
5091
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5092
                cpu_loop_exit();
5093
                break;
5094
        }
5095
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5096
    }
5097
}
5098

    
5099
void helper_vmmcall(void)
5100
{
5101
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5102
    raise_exception(EXCP06_ILLOP);
5103
}
5104

    
5105
void helper_vmload(int aflag)
5106
{
5107
    target_ulong addr;
5108
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5109

    
5110
    if (aflag == 2)
5111
        addr = EAX;
5112
    else
5113
        addr = (uint32_t)EAX;
5114

    
5115
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5116
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5117
                env->segs[R_FS].base);
5118

    
5119
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5120
                       env, R_FS);
5121
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5122
                       env, R_GS);
5123
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5124
                 &env->tr);
5125
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5126
                 &env->ldt);
5127

    
5128
#ifdef TARGET_X86_64
5129
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5130
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5131
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5132
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5133
#endif
5134
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5135
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5136
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5137
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5138
}
5139

    
5140
void helper_vmsave(int aflag)
5141
{
5142
    target_ulong addr;
5143
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5144

    
5145
    if (aflag == 2)
5146
        addr = EAX;
5147
    else
5148
        addr = (uint32_t)EAX;
5149

    
5150
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5151
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5152
                env->segs[R_FS].base);
5153

    
5154
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5155
                 &env->segs[R_FS]);
5156
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5157
                 &env->segs[R_GS]);
5158
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5159
                 &env->tr);
5160
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5161
                 &env->ldt);
5162

    
5163
#ifdef TARGET_X86_64
5164
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5165
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5166
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5167
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5168
#endif
5169
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5170
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5171
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5172
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5173
}
5174

    
5175
void helper_stgi(void)
5176
{
5177
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5178
    env->hflags2 |= HF2_GIF_MASK;
5179
}
5180

    
5181
void helper_clgi(void)
5182
{
5183
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5184
    env->hflags2 &= ~HF2_GIF_MASK;
5185
}
5186

    
5187
void helper_skinit(void)
5188
{
5189
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5190
    /* XXX: not implemented */
5191
    raise_exception(EXCP06_ILLOP);
5192
}
5193

    
5194
void helper_invlpga(int aflag)
5195
{
5196
    target_ulong addr;
5197
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5198
    
5199
    if (aflag == 2)
5200
        addr = EAX;
5201
    else
5202
        addr = (uint32_t)EAX;
5203

    
5204
    /* XXX: could use the ASID to see if it is needed to do the
5205
       flush */
5206
    tlb_flush_page(env, addr);
5207
}
5208

    
5209
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5210
{
5211
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5212
        return;
5213
    switch(type) {
5214
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5215
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5216
            helper_vmexit(type, param);
5217
        }
5218
        break;
5219
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5220
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5221
            helper_vmexit(type, param);
5222
        }
5223
        break;
5224
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5225
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5226
            helper_vmexit(type, param);
5227
        }
5228
        break;
5229
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5230
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5231
            helper_vmexit(type, param);
5232
        }
5233
        break;
5234
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5235
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5236
            helper_vmexit(type, param);
5237
        }
5238
        break;
5239
    case SVM_EXIT_MSR:
5240
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5241
            /* FIXME: this should be read in at vmrun (faster this way?) */
5242
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5243
            uint32_t t0, t1;
5244
            switch((uint32_t)ECX) {
5245
            case 0 ... 0x1fff:
5246
                t0 = (ECX * 2) % 8;
5247
                t1 = (ECX * 2) / 8;
5248
                break;
5249
            case 0xc0000000 ... 0xc0001fff:
5250
                t0 = (8192 + ECX - 0xc0000000) * 2;
5251
                t1 = (t0 / 8);
5252
                t0 %= 8;
5253
                break;
5254
            case 0xc0010000 ... 0xc0011fff:
5255
                t0 = (16384 + ECX - 0xc0010000) * 2;
5256
                t1 = (t0 / 8);
5257
                t0 %= 8;
5258
                break;
5259
            default:
5260
                helper_vmexit(type, param);
5261
                t0 = 0;
5262
                t1 = 0;
5263
                break;
5264
            }
5265
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5266
                helper_vmexit(type, param);
5267
        }
5268
        break;
5269
    default:
5270
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5271
            helper_vmexit(type, param);
5272
        }
5273
        break;
5274
    }
5275
}
5276

    
5277
void helper_svm_check_io(uint32_t port, uint32_t param, 
5278
                         uint32_t next_eip_addend)
5279
{
5280
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5281
        /* FIXME: this should be read in at vmrun (faster this way?) */
5282
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5283
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5284
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5285
            /* next EIP */
5286
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5287
                     env->eip + next_eip_addend);
5288
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5289
        }
5290
    }
5291
}
5292

    
5293
/* Note: currently only 32 bits of exit_code are used */
5294
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5295
{
5296
    uint32_t int_ctl;
5297

    
5298
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5299
                exit_code, exit_info_1,
5300
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5301
                EIP);
5302

    
5303
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5304
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5305
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5306
    } else {
5307
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5308
    }
5309

    
5310
    /* Save the VM state in the vmcb */
5311
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5312
                 &env->segs[R_ES]);
5313
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5314
                 &env->segs[R_CS]);
5315
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5316
                 &env->segs[R_SS]);
5317
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5318
                 &env->segs[R_DS]);
5319

    
5320
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5321
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5322

    
5323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5324
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5325

    
5326
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5328
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5329
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5330
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5331

    
5332
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5333
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5334
    int_ctl |= env->v_tpr & V_TPR_MASK;
5335
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5336
        int_ctl |= V_IRQ_MASK;
5337
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5338

    
5339
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5340
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5342
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5343
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5344
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5345
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5346

    
5347
    /* Reload the host state from vm_hsave */
5348
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5349
    env->hflags &= ~HF_SVMI_MASK;
5350
    env->intercept = 0;
5351
    env->intercept_exceptions = 0;
5352
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5353
    env->tsc_offset = 0;
5354

    
5355
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5356
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5357

    
5358
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5359
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5360

    
5361
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5362
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5363
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5364
    /* we need to set the efer after the crs so the hidden flags get
5365
       set properly */
5366
    cpu_load_efer(env, 
5367
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5368
    env->eflags = 0;
5369
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5370
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5371
    CC_OP = CC_OP_EFLAGS;
5372

    
5373
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5374
                       env, R_ES);
5375
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5376
                       env, R_CS);
5377
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5378
                       env, R_SS);
5379
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5380
                       env, R_DS);
5381

    
5382
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5383
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5384
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5385

    
5386
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5387
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5388

    
5389
    /* other setups */
5390
    cpu_x86_set_cpl(env, 0);
5391
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5392
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5393

    
5394
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5395
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5396
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5397
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5398
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5399

    
5400
    env->hflags2 &= ~HF2_GIF_MASK;
5401
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5402

    
5403
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5404

    
5405
    /* Clears the TSC_OFFSET inside the processor. */
5406

    
5407
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5408
       from the page table indicated the host's CR3. If the PDPEs contain
5409
       illegal state, the processor causes a shutdown. */
5410

    
5411
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5412
    env->cr[0] |= CR0_PE_MASK;
5413
    env->eflags &= ~VM_MASK;
5414

    
5415
    /* Disables all breakpoints in the host DR7 register. */
5416

    
5417
    /* Checks the reloaded host state for consistency. */
5418

    
5419
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5420
       host's code segment or non-canonical (in the case of long mode), a
5421
       #GP fault is delivered inside the host.) */
5422

    
5423
    /* remove any pending exception */
5424
    env->exception_index = -1;
5425
    env->error_code = 0;
5426
    env->old_exception = -1;
5427

    
5428
    cpu_loop_exit();
5429
}
5430

    
5431
#endif
5432

    
5433
/* MMX/SSE */
5434
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5435
void helper_enter_mmx(void)
5436
{
5437
    env->fpstt = 0;
5438
    *(uint32_t *)(env->fptags) = 0;
5439
    *(uint32_t *)(env->fptags + 4) = 0;
5440
}
5441

    
5442
void helper_emms(void)
5443
{
5444
    /* set to empty state */
5445
    *(uint32_t *)(env->fptags) = 0x01010101;
5446
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5447
}
5448

    
5449
/* XXX: suppress */
5450
void helper_movq(void *d, void *s)
5451
{
5452
    *(uint64_t *)d = *(uint64_t *)s;
5453
}
5454

    
5455
#define SHIFT 0
5456
#include "ops_sse.h"
5457

    
5458
#define SHIFT 1
5459
#include "ops_sse.h"
5460

    
5461
#define SHIFT 0
5462
#include "helper_template.h"
5463
#undef SHIFT
5464

    
5465
#define SHIFT 1
5466
#include "helper_template.h"
5467
#undef SHIFT
5468

    
5469
#define SHIFT 2
5470
#include "helper_template.h"
5471
#undef SHIFT
5472

    
5473
#ifdef TARGET_X86_64
5474

    
5475
#define SHIFT 3
5476
#include "helper_template.h"
5477
#undef SHIFT
5478

    
5479
#endif
5480

    
5481
/* bit operations */
5482
target_ulong helper_bsf(target_ulong t0)
5483
{
5484
    int count;
5485
    target_ulong res;
5486

    
5487
    res = t0;
5488
    count = 0;
5489
    while ((res & 1) == 0) {
5490
        count++;
5491
        res >>= 1;
5492
    }
5493
    return count;
5494
}
5495

    
5496
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5497
{
5498
    int count;
5499
    target_ulong res, mask;
5500

    
5501
    if (wordsize > 0 && t0 == 0) {
5502
        return wordsize;
5503
    }
5504
    res = t0;
5505
    count = TARGET_LONG_BITS - 1;
5506
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5507
    while ((res & mask) == 0) {
5508
        count--;
5509
        res <<= 1;
5510
    }
5511
    if (wordsize > 0) {
5512
        return wordsize - 1 - count;
5513
    }
5514
    return count;
5515
}
5516

    
5517
target_ulong helper_bsr(target_ulong t0)
5518
{
5519
        return helper_lzcnt(t0, 0);
5520
}
5521

    
5522
static int compute_all_eflags(void)
5523
{
5524
    return CC_SRC;
5525
}
5526

    
5527
static int compute_c_eflags(void)
5528
{
5529
    return CC_SRC & CC_C;
5530
}
5531

    
5532
uint32_t helper_cc_compute_all(int op)
5533
{
5534
    switch (op) {
5535
    default: /* should never happen */ return 0;
5536

    
5537
    case CC_OP_EFLAGS: return compute_all_eflags();
5538

    
5539
    case CC_OP_MULB: return compute_all_mulb();
5540
    case CC_OP_MULW: return compute_all_mulw();
5541
    case CC_OP_MULL: return compute_all_mull();
5542

    
5543
    case CC_OP_ADDB: return compute_all_addb();
5544
    case CC_OP_ADDW: return compute_all_addw();
5545
    case CC_OP_ADDL: return compute_all_addl();
5546

    
5547
    case CC_OP_ADCB: return compute_all_adcb();
5548
    case CC_OP_ADCW: return compute_all_adcw();
5549
    case CC_OP_ADCL: return compute_all_adcl();
5550

    
5551
    case CC_OP_SUBB: return compute_all_subb();
5552
    case CC_OP_SUBW: return compute_all_subw();
5553
    case CC_OP_SUBL: return compute_all_subl();
5554

    
5555
    case CC_OP_SBBB: return compute_all_sbbb();
5556
    case CC_OP_SBBW: return compute_all_sbbw();
5557
    case CC_OP_SBBL: return compute_all_sbbl();
5558

    
5559
    case CC_OP_LOGICB: return compute_all_logicb();
5560
    case CC_OP_LOGICW: return compute_all_logicw();
5561
    case CC_OP_LOGICL: return compute_all_logicl();
5562

    
5563
    case CC_OP_INCB: return compute_all_incb();
5564
    case CC_OP_INCW: return compute_all_incw();
5565
    case CC_OP_INCL: return compute_all_incl();
5566

    
5567
    case CC_OP_DECB: return compute_all_decb();
5568
    case CC_OP_DECW: return compute_all_decw();
5569
    case CC_OP_DECL: return compute_all_decl();
5570

    
5571
    case CC_OP_SHLB: return compute_all_shlb();
5572
    case CC_OP_SHLW: return compute_all_shlw();
5573
    case CC_OP_SHLL: return compute_all_shll();
5574

    
5575
    case CC_OP_SARB: return compute_all_sarb();
5576
    case CC_OP_SARW: return compute_all_sarw();
5577
    case CC_OP_SARL: return compute_all_sarl();
5578

    
5579
#ifdef TARGET_X86_64
5580
    case CC_OP_MULQ: return compute_all_mulq();
5581

    
5582
    case CC_OP_ADDQ: return compute_all_addq();
5583

    
5584
    case CC_OP_ADCQ: return compute_all_adcq();
5585

    
5586
    case CC_OP_SUBQ: return compute_all_subq();
5587

    
5588
    case CC_OP_SBBQ: return compute_all_sbbq();
5589

    
5590
    case CC_OP_LOGICQ: return compute_all_logicq();
5591

    
5592
    case CC_OP_INCQ: return compute_all_incq();
5593

    
5594
    case CC_OP_DECQ: return compute_all_decq();
5595

    
5596
    case CC_OP_SHLQ: return compute_all_shlq();
5597

    
5598
    case CC_OP_SARQ: return compute_all_sarq();
5599
#endif
5600
    }
5601
}
5602

    
5603
uint32_t helper_cc_compute_c(int op)
5604
{
5605
    switch (op) {
5606
    default: /* should never happen */ return 0;
5607

    
5608
    case CC_OP_EFLAGS: return compute_c_eflags();
5609

    
5610
    case CC_OP_MULB: return compute_c_mull();
5611
    case CC_OP_MULW: return compute_c_mull();
5612
    case CC_OP_MULL: return compute_c_mull();
5613

    
5614
    case CC_OP_ADDB: return compute_c_addb();
5615
    case CC_OP_ADDW: return compute_c_addw();
5616
    case CC_OP_ADDL: return compute_c_addl();
5617

    
5618
    case CC_OP_ADCB: return compute_c_adcb();
5619
    case CC_OP_ADCW: return compute_c_adcw();
5620
    case CC_OP_ADCL: return compute_c_adcl();
5621

    
5622
    case CC_OP_SUBB: return compute_c_subb();
5623
    case CC_OP_SUBW: return compute_c_subw();
5624
    case CC_OP_SUBL: return compute_c_subl();
5625

    
5626
    case CC_OP_SBBB: return compute_c_sbbb();
5627
    case CC_OP_SBBW: return compute_c_sbbw();
5628
    case CC_OP_SBBL: return compute_c_sbbl();
5629

    
5630
    case CC_OP_LOGICB: return compute_c_logicb();
5631
    case CC_OP_LOGICW: return compute_c_logicw();
5632
    case CC_OP_LOGICL: return compute_c_logicl();
5633

    
5634
    case CC_OP_INCB: return compute_c_incl();
5635
    case CC_OP_INCW: return compute_c_incl();
5636
    case CC_OP_INCL: return compute_c_incl();
5637

    
5638
    case CC_OP_DECB: return compute_c_incl();
5639
    case CC_OP_DECW: return compute_c_incl();
5640
    case CC_OP_DECL: return compute_c_incl();
5641

    
5642
    case CC_OP_SHLB: return compute_c_shlb();
5643
    case CC_OP_SHLW: return compute_c_shlw();
5644
    case CC_OP_SHLL: return compute_c_shll();
5645

    
5646
    case CC_OP_SARB: return compute_c_sarl();
5647
    case CC_OP_SARW: return compute_c_sarl();
5648
    case CC_OP_SARL: return compute_c_sarl();
5649

    
5650
#ifdef TARGET_X86_64
5651
    case CC_OP_MULQ: return compute_c_mull();
5652

    
5653
    case CC_OP_ADDQ: return compute_c_addq();
5654

    
5655
    case CC_OP_ADCQ: return compute_c_adcq();
5656

    
5657
    case CC_OP_SUBQ: return compute_c_subq();
5658

    
5659
    case CC_OP_SBBQ: return compute_c_sbbq();
5660

    
5661
    case CC_OP_LOGICQ: return compute_c_logicq();
5662

    
5663
    case CC_OP_INCQ: return compute_c_incl();
5664

    
5665
    case CC_OP_DECQ: return compute_c_incl();
5666

    
5667
    case CC_OP_SHLQ: return compute_c_shlq();
5668

    
5669
    case CC_OP_SARQ: return compute_c_sarl();
5670
#endif
5671
    }
5672
}