Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 618ba8e6

History | View | Annotate | Download (159.5 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352
    /* XXX: avoid a compiler warning, see
353
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
354
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
355
    (void)new_trap;
356

    
357
    /* NOTE: we must avoid memory exceptions during the task switch,
358
       so we make dummy accesses before */
359
    /* XXX: it can still fail in some cases, so a bigger hack is
360
       necessary to valid the TLB after having done the accesses */
361

    
362
    v1 = ldub_kernel(env->tr.base);
363
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
364
    stb_kernel(env->tr.base, v1);
365
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
366

    
367
    /* clear busy bit (it is restartable) */
368
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
369
        target_ulong ptr;
370
        uint32_t e2;
371
        ptr = env->gdt.base + (env->tr.selector & ~7);
372
        e2 = ldl_kernel(ptr + 4);
373
        e2 &= ~DESC_TSS_BUSY_MASK;
374
        stl_kernel(ptr + 4, e2);
375
    }
376
    old_eflags = compute_eflags();
377
    if (source == SWITCH_TSS_IRET)
378
        old_eflags &= ~NT_MASK;
379

    
380
    /* save the current state in the old TSS */
381
    if (type & 8) {
382
        /* 32 bit */
383
        stl_kernel(env->tr.base + 0x20, next_eip);
384
        stl_kernel(env->tr.base + 0x24, old_eflags);
385
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
386
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
387
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
388
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
389
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
390
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
391
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
392
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
393
        for(i = 0; i < 6; i++)
394
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
395
    } else {
396
        /* 16 bit */
397
        stw_kernel(env->tr.base + 0x0e, next_eip);
398
        stw_kernel(env->tr.base + 0x10, old_eflags);
399
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
400
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
401
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
402
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
403
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
404
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
405
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
406
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
407
        for(i = 0; i < 4; i++)
408
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
409
    }
410

    
411
    /* now if an exception occurs, it will occurs in the next task
412
       context */
413

    
414
    if (source == SWITCH_TSS_CALL) {
415
        stw_kernel(tss_base, env->tr.selector);
416
        new_eflags |= NT_MASK;
417
    }
418

    
419
    /* set busy bit */
420
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
421
        target_ulong ptr;
422
        uint32_t e2;
423
        ptr = env->gdt.base + (tss_selector & ~7);
424
        e2 = ldl_kernel(ptr + 4);
425
        e2 |= DESC_TSS_BUSY_MASK;
426
        stl_kernel(ptr + 4, e2);
427
    }
428

    
429
    /* set the new CPU state */
430
    /* from this point, any exception which occurs can give problems */
431
    env->cr[0] |= CR0_TS_MASK;
432
    env->hflags |= HF_TS_MASK;
433
    env->tr.selector = tss_selector;
434
    env->tr.base = tss_base;
435
    env->tr.limit = tss_limit;
436
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
437

    
438
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
439
        cpu_x86_update_cr3(env, new_cr3);
440
    }
441

    
442
    /* load all registers without an exception, then reload them with
443
       possible exception */
444
    env->eip = new_eip;
445
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
446
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
447
    if (!(type & 8))
448
        eflags_mask &= 0xffff;
449
    load_eflags(new_eflags, eflags_mask);
450
    /* XXX: what to do in 16 bit case ? */
451
    EAX = new_regs[0];
452
    ECX = new_regs[1];
453
    EDX = new_regs[2];
454
    EBX = new_regs[3];
455
    ESP = new_regs[4];
456
    EBP = new_regs[5];
457
    ESI = new_regs[6];
458
    EDI = new_regs[7];
459
    if (new_eflags & VM_MASK) {
460
        for(i = 0; i < 6; i++)
461
            load_seg_vm(i, new_segs[i]);
462
        /* in vm86, CPL is always 3 */
463
        cpu_x86_set_cpl(env, 3);
464
    } else {
465
        /* CPL is set the RPL of CS */
466
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
467
        /* first just selectors as the rest may trigger exceptions */
468
        for(i = 0; i < 6; i++)
469
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
470
    }
471

    
472
    env->ldt.selector = new_ldt & ~4;
473
    env->ldt.base = 0;
474
    env->ldt.limit = 0;
475
    env->ldt.flags = 0;
476

    
477
    /* load the LDT */
478
    if (new_ldt & 4)
479
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480

    
481
    if ((new_ldt & 0xfffc) != 0) {
482
        dt = &env->gdt;
483
        index = new_ldt & ~7;
484
        if ((index + 7) > dt->limit)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        ptr = dt->base + index;
487
        e1 = ldl_kernel(ptr);
488
        e2 = ldl_kernel(ptr + 4);
489
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
490
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491
        if (!(e2 & DESC_P_MASK))
492
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
494
    }
495

    
496
    /* load the segments */
497
    if (!(new_eflags & VM_MASK)) {
498
        tss_load_seg(R_CS, new_segs[R_CS]);
499
        tss_load_seg(R_SS, new_segs[R_SS]);
500
        tss_load_seg(R_ES, new_segs[R_ES]);
501
        tss_load_seg(R_DS, new_segs[R_DS]);
502
        tss_load_seg(R_FS, new_segs[R_FS]);
503
        tss_load_seg(R_GS, new_segs[R_GS]);
504
    }
505

    
506
    /* check that EIP is in the CS segment limits */
507
    if (new_eip > env->segs[R_CS].limit) {
508
        /* XXX: different exception if CALL ? */
509
        raise_exception_err(EXCP0D_GPF, 0);
510
    }
511

    
512
#ifndef CONFIG_USER_ONLY
513
    /* reset local breakpoints */
514
    if (env->dr[7] & 0x55) {
515
        for (i = 0; i < 4; i++) {
516
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
517
                hw_breakpoint_remove(env, i);
518
        }
519
        env->dr[7] &= ~0x55;
520
    }
521
#endif
522
}
523

    
524
/* check if Port I/O is allowed in TSS */
525
static inline void check_io(int addr, int size)
526
{
527
    int io_offset, val, mask;
528

    
529
    /* TSS must be a valid 32 bit one */
530
    if (!(env->tr.flags & DESC_P_MASK) ||
531
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
532
        env->tr.limit < 103)
533
        goto fail;
534
    io_offset = lduw_kernel(env->tr.base + 0x66);
535
    io_offset += (addr >> 3);
536
    /* Note: the check needs two bytes */
537
    if ((io_offset + 1) > env->tr.limit)
538
        goto fail;
539
    val = lduw_kernel(env->tr.base + io_offset);
540
    val >>= (addr & 7);
541
    mask = (1 << size) - 1;
542
    /* all bits must be zero to allow the I/O */
543
    if ((val & mask) != 0) {
544
    fail:
545
        raise_exception_err(EXCP0D_GPF, 0);
546
    }
547
}
548

    
549
void helper_check_iob(uint32_t t0)
550
{
551
    check_io(t0, 1);
552
}
553

    
554
void helper_check_iow(uint32_t t0)
555
{
556
    check_io(t0, 2);
557
}
558

    
559
void helper_check_iol(uint32_t t0)
560
{
561
    check_io(t0, 4);
562
}
563

    
564
void helper_outb(uint32_t port, uint32_t data)
565
{
566
    cpu_outb(port, data & 0xff);
567
}
568

    
569
target_ulong helper_inb(uint32_t port)
570
{
571
    return cpu_inb(port);
572
}
573

    
574
void helper_outw(uint32_t port, uint32_t data)
575
{
576
    cpu_outw(port, data & 0xffff);
577
}
578

    
579
target_ulong helper_inw(uint32_t port)
580
{
581
    return cpu_inw(port);
582
}
583

    
584
void helper_outl(uint32_t port, uint32_t data)
585
{
586
    cpu_outl(port, data);
587
}
588

    
589
target_ulong helper_inl(uint32_t port)
590
{
591
    return cpu_inl(port);
592
}
593

    
594
static inline unsigned int get_sp_mask(unsigned int e2)
595
{
596
    if (e2 & DESC_B_MASK)
597
        return 0xffffffff;
598
    else
599
        return 0xffff;
600
}
601

    
602
static int exeption_has_error_code(int intno)
603
{
604
        switch(intno) {
605
        case 8:
606
        case 10:
607
        case 11:
608
        case 12:
609
        case 13:
610
        case 14:
611
        case 17:
612
            return 1;
613
        }
614
        return 0;
615
}
616

    
617
#ifdef TARGET_X86_64
618
#define SET_ESP(val, sp_mask)\
619
do {\
620
    if ((sp_mask) == 0xffff)\
621
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622
    else if ((sp_mask) == 0xffffffffLL)\
623
        ESP = (uint32_t)(val);\
624
    else\
625
        ESP = (val);\
626
} while (0)
627
#else
628
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
629
#endif
630

    
631
/* in 64-bit machines, this can overflow. So this segment addition macro
632
 * can be used to trim the value to 32-bit whenever needed */
633
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
634

    
635
/* XXX: add a is_user flag to have proper security support */
636
#define PUSHW(ssp, sp, sp_mask, val)\
637
{\
638
    sp -= 2;\
639
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
640
}
641

    
642
#define PUSHL(ssp, sp, sp_mask, val)\
643
{\
644
    sp -= 4;\
645
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
646
}
647

    
648
#define POPW(ssp, sp, sp_mask, val)\
649
{\
650
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
651
    sp += 2;\
652
}
653

    
654
#define POPL(ssp, sp, sp_mask, val)\
655
{\
656
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
657
    sp += 4;\
658
}
659

    
660
/* protected mode interrupt */
661
static void do_interrupt_protected(int intno, int is_int, int error_code,
662
                                   unsigned int next_eip, int is_hw)
663
{
664
    SegmentCache *dt;
665
    target_ulong ptr, ssp;
666
    int type, dpl, selector, ss_dpl, cpl;
667
    int has_error_code, new_stack, shift;
668
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
669
    uint32_t old_eip, sp_mask;
670

    
671
    has_error_code = 0;
672
    if (!is_int && !is_hw)
673
        has_error_code = exeption_has_error_code(intno);
674
    if (is_int)
675
        old_eip = next_eip;
676
    else
677
        old_eip = env->eip;
678

    
679
    dt = &env->idt;
680
    if (intno * 8 + 7 > dt->limit)
681
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
682
    ptr = dt->base + intno * 8;
683
    e1 = ldl_kernel(ptr);
684
    e2 = ldl_kernel(ptr + 4);
685
    /* check gate type */
686
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
687
    switch(type) {
688
    case 5: /* task gate */
689
        /* must do that check here to return the correct error code */
690
        if (!(e2 & DESC_P_MASK))
691
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
692
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
693
        if (has_error_code) {
694
            int type;
695
            uint32_t mask;
696
            /* push the error code */
697
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
698
            shift = type >> 3;
699
            if (env->segs[R_SS].flags & DESC_B_MASK)
700
                mask = 0xffffffff;
701
            else
702
                mask = 0xffff;
703
            esp = (ESP - (2 << shift)) & mask;
704
            ssp = env->segs[R_SS].base + esp;
705
            if (shift)
706
                stl_kernel(ssp, error_code);
707
            else
708
                stw_kernel(ssp, error_code);
709
            SET_ESP(esp, mask);
710
        }
711
        return;
712
    case 6: /* 286 interrupt gate */
713
    case 7: /* 286 trap gate */
714
    case 14: /* 386 interrupt gate */
715
    case 15: /* 386 trap gate */
716
        break;
717
    default:
718
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719
        break;
720
    }
721
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
722
    cpl = env->hflags & HF_CPL_MASK;
723
    /* check privilege if software int */
724
    if (is_int && dpl < cpl)
725
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
726
    /* check valid bit */
727
    if (!(e2 & DESC_P_MASK))
728
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
729
    selector = e1 >> 16;
730
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
731
    if ((selector & 0xfffc) == 0)
732
        raise_exception_err(EXCP0D_GPF, 0);
733

    
734
    if (load_segment(&e1, &e2, selector) != 0)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
737
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
739
    if (dpl > cpl)
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
    if (!(e2 & DESC_P_MASK))
742
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
743
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
744
        /* to inner privilege */
745
        get_ss_esp_from_tss(&ss, &esp, dpl);
746
        if ((ss & 0xfffc) == 0)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        if ((ss & 3) != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
751
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
753
        if (ss_dpl != dpl)
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        if (!(ss_e2 & DESC_S_MASK) ||
756
            (ss_e2 & DESC_CS_MASK) ||
757
            !(ss_e2 & DESC_W_MASK))
758
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759
        if (!(ss_e2 & DESC_P_MASK))
760
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761
        new_stack = 1;
762
        sp_mask = get_sp_mask(ss_e2);
763
        ssp = get_seg_base(ss_e1, ss_e2);
764
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
765
        /* to same privilege */
766
        if (env->eflags & VM_MASK)
767
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768
        new_stack = 0;
769
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
770
        ssp = env->segs[R_SS].base;
771
        esp = ESP;
772
        dpl = cpl;
773
    } else {
774
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
775
        new_stack = 0; /* avoid warning */
776
        sp_mask = 0; /* avoid warning */
777
        ssp = 0; /* avoid warning */
778
        esp = 0; /* avoid warning */
779
    }
780

    
781
    shift = type >> 3;
782

    
783
#if 0
784
    /* XXX: check that enough room is available */
785
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786
    if (env->eflags & VM_MASK)
787
        push_size += 8;
788
    push_size <<= shift;
789
#endif
790
    if (shift == 1) {
791
        if (new_stack) {
792
            if (env->eflags & VM_MASK) {
793
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
794
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
795
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
796
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
797
            }
798
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
799
            PUSHL(ssp, esp, sp_mask, ESP);
800
        }
801
        PUSHL(ssp, esp, sp_mask, compute_eflags());
802
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
803
        PUSHL(ssp, esp, sp_mask, old_eip);
804
        if (has_error_code) {
805
            PUSHL(ssp, esp, sp_mask, error_code);
806
        }
807
    } else {
808
        if (new_stack) {
809
            if (env->eflags & VM_MASK) {
810
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
811
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
812
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
813
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
814
            }
815
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
816
            PUSHW(ssp, esp, sp_mask, ESP);
817
        }
818
        PUSHW(ssp, esp, sp_mask, compute_eflags());
819
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
820
        PUSHW(ssp, esp, sp_mask, old_eip);
821
        if (has_error_code) {
822
            PUSHW(ssp, esp, sp_mask, error_code);
823
        }
824
    }
825

    
826
    if (new_stack) {
827
        if (env->eflags & VM_MASK) {
828
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
829
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
830
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
831
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
832
        }
833
        ss = (ss & ~3) | dpl;
834
        cpu_x86_load_seg_cache(env, R_SS, ss,
835
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
836
    }
837
    SET_ESP(esp, sp_mask);
838

    
839
    selector = (selector & ~3) | dpl;
840
    cpu_x86_load_seg_cache(env, R_CS, selector,
841
                   get_seg_base(e1, e2),
842
                   get_seg_limit(e1, e2),
843
                   e2);
844
    cpu_x86_set_cpl(env, dpl);
845
    env->eip = offset;
846

    
847
    /* interrupt gate clear IF mask */
848
    if ((type & 1) == 0) {
849
        env->eflags &= ~IF_MASK;
850
    }
851
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852
}
853

    
854
#ifdef TARGET_X86_64
855

    
856
#define PUSHQ(sp, val)\
857
{\
858
    sp -= 8;\
859
    stq_kernel(sp, (val));\
860
}
861

    
862
#define POPQ(sp, val)\
863
{\
864
    val = ldq_kernel(sp);\
865
    sp += 8;\
866
}
867

    
868
static inline target_ulong get_rsp_from_tss(int level)
869
{
870
    int index;
871

    
872
#if 0
873
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
874
           env->tr.base, env->tr.limit);
875
#endif
876

    
877
    if (!(env->tr.flags & DESC_P_MASK))
878
        cpu_abort(env, "invalid tss");
879
    index = 8 * level + 4;
880
    if ((index + 7) > env->tr.limit)
881
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
882
    return ldq_kernel(env->tr.base + index);
883
}
884

    
885
/* 64 bit interrupt */
886
static void do_interrupt64(int intno, int is_int, int error_code,
887
                           target_ulong next_eip, int is_hw)
888
{
889
    SegmentCache *dt;
890
    target_ulong ptr;
891
    int type, dpl, selector, cpl, ist;
892
    int has_error_code, new_stack;
893
    uint32_t e1, e2, e3, ss;
894
    target_ulong old_eip, esp, offset;
895

    
896
    has_error_code = 0;
897
    if (!is_int && !is_hw)
898
        has_error_code = exeption_has_error_code(intno);
899
    if (is_int)
900
        old_eip = next_eip;
901
    else
902
        old_eip = env->eip;
903

    
904
    dt = &env->idt;
905
    if (intno * 16 + 15 > dt->limit)
906
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
907
    ptr = dt->base + intno * 16;
908
    e1 = ldl_kernel(ptr);
909
    e2 = ldl_kernel(ptr + 4);
910
    e3 = ldl_kernel(ptr + 8);
911
    /* check gate type */
912
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
913
    switch(type) {
914
    case 14: /* 386 interrupt gate */
915
    case 15: /* 386 trap gate */
916
        break;
917
    default:
918
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919
        break;
920
    }
921
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922
    cpl = env->hflags & HF_CPL_MASK;
923
    /* check privilege if software int */
924
    if (is_int && dpl < cpl)
925
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
926
    /* check valid bit */
927
    if (!(e2 & DESC_P_MASK))
928
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
929
    selector = e1 >> 16;
930
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
931
    ist = e2 & 7;
932
    if ((selector & 0xfffc) == 0)
933
        raise_exception_err(EXCP0D_GPF, 0);
934

    
935
    if (load_segment(&e1, &e2, selector) != 0)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940
    if (dpl > cpl)
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if (!(e2 & DESC_P_MASK))
943
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
944
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
945
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
947
        /* to inner privilege */
948
        if (ist != 0)
949
            esp = get_rsp_from_tss(ist + 3);
950
        else
951
            esp = get_rsp_from_tss(dpl);
952
        esp &= ~0xfLL; /* align stack */
953
        ss = 0;
954
        new_stack = 1;
955
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
956
        /* to same privilege */
957
        if (env->eflags & VM_MASK)
958
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
959
        new_stack = 0;
960
        if (ist != 0)
961
            esp = get_rsp_from_tss(ist + 3);
962
        else
963
            esp = ESP;
964
        esp &= ~0xfLL; /* align stack */
965
        dpl = cpl;
966
    } else {
967
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968
        new_stack = 0; /* avoid warning */
969
        esp = 0; /* avoid warning */
970
    }
971

    
972
    PUSHQ(esp, env->segs[R_SS].selector);
973
    PUSHQ(esp, ESP);
974
    PUSHQ(esp, compute_eflags());
975
    PUSHQ(esp, env->segs[R_CS].selector);
976
    PUSHQ(esp, old_eip);
977
    if (has_error_code) {
978
        PUSHQ(esp, error_code);
979
    }
980

    
981
    if (new_stack) {
982
        ss = 0 | dpl;
983
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
984
    }
985
    ESP = esp;
986

    
987
    selector = (selector & ~3) | dpl;
988
    cpu_x86_load_seg_cache(env, R_CS, selector,
989
                   get_seg_base(e1, e2),
990
                   get_seg_limit(e1, e2),
991
                   e2);
992
    cpu_x86_set_cpl(env, dpl);
993
    env->eip = offset;
994

    
995
    /* interrupt gate clear IF mask */
996
    if ((type & 1) == 0) {
997
        env->eflags &= ~IF_MASK;
998
    }
999
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1000
}
1001
#endif
1002

    
1003
#ifdef TARGET_X86_64
1004
#if defined(CONFIG_USER_ONLY)
1005
void helper_syscall(int next_eip_addend)
1006
{
1007
    env->exception_index = EXCP_SYSCALL;
1008
    env->exception_next_eip = env->eip + next_eip_addend;
1009
    cpu_loop_exit();
1010
}
1011
#else
1012
void helper_syscall(int next_eip_addend)
1013
{
1014
    int selector;
1015

    
1016
    if (!(env->efer & MSR_EFER_SCE)) {
1017
        raise_exception_err(EXCP06_ILLOP, 0);
1018
    }
1019
    selector = (env->star >> 32) & 0xffff;
1020
    if (env->hflags & HF_LMA_MASK) {
1021
        int code64;
1022

    
1023
        ECX = env->eip + next_eip_addend;
1024
        env->regs[11] = compute_eflags();
1025

    
1026
        code64 = env->hflags & HF_CS64_MASK;
1027

    
1028
        cpu_x86_set_cpl(env, 0);
1029
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030
                           0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1034
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035
                               0, 0xffffffff,
1036
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037
                               DESC_S_MASK |
1038
                               DESC_W_MASK | DESC_A_MASK);
1039
        env->eflags &= ~env->fmask;
1040
        load_eflags(env->eflags, 0);
1041
        if (code64)
1042
            env->eip = env->lstar;
1043
        else
1044
            env->eip = env->cstar;
1045
    } else {
1046
        ECX = (uint32_t)(env->eip + next_eip_addend);
1047

    
1048
        cpu_x86_set_cpl(env, 0);
1049
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1050
                           0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1055
                               0, 0xffffffff,
1056
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057
                               DESC_S_MASK |
1058
                               DESC_W_MASK | DESC_A_MASK);
1059
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1060
        env->eip = (uint32_t)env->star;
1061
    }
1062
}
1063
#endif
1064
#endif
1065

    
1066
#ifdef TARGET_X86_64
1067
void helper_sysret(int dflag)
1068
{
1069
    int cpl, selector;
1070

    
1071
    if (!(env->efer & MSR_EFER_SCE)) {
1072
        raise_exception_err(EXCP06_ILLOP, 0);
1073
    }
1074
    cpl = env->hflags & HF_CPL_MASK;
1075
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1076
        raise_exception_err(EXCP0D_GPF, 0);
1077
    }
1078
    selector = (env->star >> 48) & 0xffff;
1079
    if (env->hflags & HF_LMA_MASK) {
1080
        if (dflag == 2) {
1081
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1082
                                   0, 0xffffffff,
1083
                                   DESC_G_MASK | DESC_P_MASK |
1084
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1086
                                   DESC_L_MASK);
1087
            env->eip = ECX;
1088
        } else {
1089
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090
                                   0, 0xffffffff,
1091
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1094
            env->eip = (uint32_t)ECX;
1095
        }
1096
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1097
                               0, 0xffffffff,
1098
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1100
                               DESC_W_MASK | DESC_A_MASK);
1101
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1102
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1103
        cpu_x86_set_cpl(env, 3);
1104
    } else {
1105
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1106
                               0, 0xffffffff,
1107
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1110
        env->eip = (uint32_t)ECX;
1111
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1112
                               0, 0xffffffff,
1113
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1114
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1115
                               DESC_W_MASK | DESC_A_MASK);
1116
        env->eflags |= IF_MASK;
1117
        cpu_x86_set_cpl(env, 3);
1118
    }
1119
}
1120
#endif
1121

    
1122
/* real mode interrupt */
1123
static void do_interrupt_real(int intno, int is_int, int error_code,
1124
                              unsigned int next_eip)
1125
{
1126
    SegmentCache *dt;
1127
    target_ulong ptr, ssp;
1128
    int selector;
1129
    uint32_t offset, esp;
1130
    uint32_t old_cs, old_eip;
1131

    
1132
    /* real mode (simpler !) */
1133
    dt = &env->idt;
1134
    if (intno * 4 + 3 > dt->limit)
1135
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1136
    ptr = dt->base + intno * 4;
1137
    offset = lduw_kernel(ptr);
1138
    selector = lduw_kernel(ptr + 2);
1139
    esp = ESP;
1140
    ssp = env->segs[R_SS].base;
1141
    if (is_int)
1142
        old_eip = next_eip;
1143
    else
1144
        old_eip = env->eip;
1145
    old_cs = env->segs[R_CS].selector;
1146
    /* XXX: use SS segment size ? */
1147
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1148
    PUSHW(ssp, esp, 0xffff, old_cs);
1149
    PUSHW(ssp, esp, 0xffff, old_eip);
1150

    
1151
    /* update processor state */
1152
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1153
    env->eip = offset;
1154
    env->segs[R_CS].selector = selector;
1155
    env->segs[R_CS].base = (selector << 4);
1156
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1157
}
1158

    
1159
/* fake user mode interrupt */
1160
void do_interrupt_user(int intno, int is_int, int error_code,
1161
                       target_ulong next_eip)
1162
{
1163
    SegmentCache *dt;
1164
    target_ulong ptr;
1165
    int dpl, cpl, shift;
1166
    uint32_t e2;
1167

    
1168
    dt = &env->idt;
1169
    if (env->hflags & HF_LMA_MASK) {
1170
        shift = 4;
1171
    } else {
1172
        shift = 3;
1173
    }
1174
    ptr = dt->base + (intno << shift);
1175
    e2 = ldl_kernel(ptr + 4);
1176

    
1177
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
    cpl = env->hflags & HF_CPL_MASK;
1179
    /* check privilege if software int */
1180
    if (is_int && dpl < cpl)
1181
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1182

    
1183
    /* Since we emulate only user space, we cannot do more than
1184
       exiting the emulation with the suitable exception and error
1185
       code */
1186
    if (is_int)
1187
        EIP = next_eip;
1188
}
1189

    
1190
#if !defined(CONFIG_USER_ONLY)
1191
static void handle_even_inj(int intno, int is_int, int error_code,
1192
                int is_hw, int rm)
1193
{
1194
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1195
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1196
            int type;
1197
            if (is_int)
1198
                    type = SVM_EVTINJ_TYPE_SOFT;
1199
            else
1200
                    type = SVM_EVTINJ_TYPE_EXEPT;
1201
            event_inj = intno | type | SVM_EVTINJ_VALID;
1202
            if (!rm && exeption_has_error_code(intno)) {
1203
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1204
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1205
            }
1206
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207
    }
1208
}
1209
#endif
1210

    
1211
/*
1212
 * Begin execution of an interruption. is_int is TRUE if coming from
1213
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214
 * instruction. It is only relevant if is_int is TRUE.
1215
 */
1216
void do_interrupt(int intno, int is_int, int error_code,
1217
                  target_ulong next_eip, int is_hw)
1218
{
1219
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1220
        if ((env->cr[0] & CR0_PE_MASK)) {
1221
            static int count;
1222
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223
                    count, intno, error_code, is_int,
1224
                    env->hflags & HF_CPL_MASK,
1225
                    env->segs[R_CS].selector, EIP,
1226
                    (int)env->segs[R_CS].base + EIP,
1227
                    env->segs[R_SS].selector, ESP);
1228
            if (intno == 0x0e) {
1229
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1230
            } else {
1231
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1232
            }
1233
            qemu_log("\n");
1234
            log_cpu_state(env, X86_DUMP_CCOP);
1235
#if 0
1236
            {
1237
                int i;
1238
                target_ulong ptr;
1239
                qemu_log("       code=");
1240
                ptr = env->segs[R_CS].base + env->eip;
1241
                for(i = 0; i < 16; i++) {
1242
                    qemu_log(" %02x", ldub(ptr + i));
1243
                }
1244
                qemu_log("\n");
1245
            }
1246
#endif
1247
            count++;
1248
        }
1249
    }
1250
    if (env->cr[0] & CR0_PE_MASK) {
1251
#if !defined(CONFIG_USER_ONLY)
1252
        if (env->hflags & HF_SVMI_MASK)
1253
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1254
#endif
1255
#ifdef TARGET_X86_64
1256
        if (env->hflags & HF_LMA_MASK) {
1257
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258
        } else
1259
#endif
1260
        {
1261
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262
        }
1263
    } else {
1264
#if !defined(CONFIG_USER_ONLY)
1265
        if (env->hflags & HF_SVMI_MASK)
1266
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1267
#endif
1268
        do_interrupt_real(intno, is_int, error_code, next_eip);
1269
    }
1270

    
1271
#if !defined(CONFIG_USER_ONLY)
1272
    if (env->hflags & HF_SVMI_MASK) {
1273
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1274
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1275
    }
1276
#endif
1277
}
1278

    
1279
/* This should come from sysemu.h - if we could include it here... */
1280
void qemu_system_reset_request(void);
1281

    
1282
/*
1283
 * Check nested exceptions and change to double or triple fault if
1284
 * needed. It should only be called, if this is not an interrupt.
1285
 * Returns the new exception number.
1286
 */
1287
static int check_exception(int intno, int *error_code)
1288
{
1289
    int first_contributory = env->old_exception == 0 ||
1290
                              (env->old_exception >= 10 &&
1291
                               env->old_exception <= 13);
1292
    int second_contributory = intno == 0 ||
1293
                               (intno >= 10 && intno <= 13);
1294

    
1295
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1296
                env->old_exception, intno);
1297

    
1298
#if !defined(CONFIG_USER_ONLY)
1299
    if (env->old_exception == EXCP08_DBLE) {
1300
        if (env->hflags & HF_SVMI_MASK)
1301
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1302

    
1303
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1304

    
1305
        qemu_system_reset_request();
1306
        return EXCP_HLT;
1307
    }
1308
#endif
1309

    
1310
    if ((first_contributory && second_contributory)
1311
        || (env->old_exception == EXCP0E_PAGE &&
1312
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1313
        intno = EXCP08_DBLE;
1314
        *error_code = 0;
1315
    }
1316

    
1317
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1318
        (intno == EXCP08_DBLE))
1319
        env->old_exception = intno;
1320

    
1321
    return intno;
1322
}
1323

    
1324
/*
1325
 * Signal an interruption. It is executed in the main CPU loop.
1326
 * is_int is TRUE if coming from the int instruction. next_eip is the
1327
 * EIP value AFTER the interrupt instruction. It is only relevant if
1328
 * is_int is TRUE.
1329
 */
1330
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1331
                                          int next_eip_addend)
1332
{
1333
    if (!is_int) {
1334
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1335
        intno = check_exception(intno, &error_code);
1336
    } else {
1337
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1338
    }
1339

    
1340
    env->exception_index = intno;
1341
    env->error_code = error_code;
1342
    env->exception_is_int = is_int;
1343
    env->exception_next_eip = env->eip + next_eip_addend;
1344
    cpu_loop_exit();
1345
}
1346

    
1347
/* shortcuts to generate exceptions */
1348

    
1349
void raise_exception_err(int exception_index, int error_code)
1350
{
1351
    raise_interrupt(exception_index, 0, error_code, 0);
1352
}
1353

    
1354
void raise_exception(int exception_index)
1355
{
1356
    raise_interrupt(exception_index, 0, 0, 0);
1357
}
1358

    
1359
void raise_exception_env(int exception_index, CPUState *nenv)
1360
{
1361
    env = nenv;
1362
    raise_exception(exception_index);
1363
}
1364
/* SMM support */
1365

    
1366
#if defined(CONFIG_USER_ONLY)
1367

    
1368
void do_smm_enter(void)
1369
{
1370
}
1371

    
1372
void helper_rsm(void)
1373
{
1374
}
1375

    
1376
#else
1377

    
1378
#ifdef TARGET_X86_64
1379
#define SMM_REVISION_ID 0x00020064
1380
#else
1381
#define SMM_REVISION_ID 0x00020000
1382
#endif
1383

    
1384
void do_smm_enter(void)
1385
{
1386
    target_ulong sm_state;
1387
    SegmentCache *dt;
1388
    int i, offset;
1389

    
1390
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392

    
1393
    env->hflags |= HF_SMM_MASK;
1394
    cpu_smm_update(env);
1395

    
1396
    sm_state = env->smbase + 0x8000;
1397

    
1398
#ifdef TARGET_X86_64
1399
    for(i = 0; i < 6; i++) {
1400
        dt = &env->segs[i];
1401
        offset = 0x7e00 + i * 16;
1402
        stw_phys(sm_state + offset, dt->selector);
1403
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404
        stl_phys(sm_state + offset + 4, dt->limit);
1405
        stq_phys(sm_state + offset + 8, dt->base);
1406
    }
1407

    
1408
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1409
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410

    
1411
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1413
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415

    
1416
    stq_phys(sm_state + 0x7e88, env->idt.base);
1417
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1418

    
1419
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1420
    stq_phys(sm_state + 0x7e98, env->tr.base);
1421
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1422
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423

    
1424
    stq_phys(sm_state + 0x7ed0, env->efer);
1425

    
1426
    stq_phys(sm_state + 0x7ff8, EAX);
1427
    stq_phys(sm_state + 0x7ff0, ECX);
1428
    stq_phys(sm_state + 0x7fe8, EDX);
1429
    stq_phys(sm_state + 0x7fe0, EBX);
1430
    stq_phys(sm_state + 0x7fd8, ESP);
1431
    stq_phys(sm_state + 0x7fd0, EBP);
1432
    stq_phys(sm_state + 0x7fc8, ESI);
1433
    stq_phys(sm_state + 0x7fc0, EDI);
1434
    for(i = 8; i < 16; i++)
1435
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436
    stq_phys(sm_state + 0x7f78, env->eip);
1437
    stl_phys(sm_state + 0x7f70, compute_eflags());
1438
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1439
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1440

    
1441
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1442
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1443
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1444

    
1445
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446
    stl_phys(sm_state + 0x7f00, env->smbase);
1447
#else
1448
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1451
    stl_phys(sm_state + 0x7ff0, env->eip);
1452
    stl_phys(sm_state + 0x7fec, EDI);
1453
    stl_phys(sm_state + 0x7fe8, ESI);
1454
    stl_phys(sm_state + 0x7fe4, EBP);
1455
    stl_phys(sm_state + 0x7fe0, ESP);
1456
    stl_phys(sm_state + 0x7fdc, EBX);
1457
    stl_phys(sm_state + 0x7fd8, EDX);
1458
    stl_phys(sm_state + 0x7fd4, ECX);
1459
    stl_phys(sm_state + 0x7fd0, EAX);
1460
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462

    
1463
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464
    stl_phys(sm_state + 0x7f64, env->tr.base);
1465
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1466
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467

    
1468
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1470
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472

    
1473
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1474
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475

    
1476
    stl_phys(sm_state + 0x7f58, env->idt.base);
1477
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1478

    
1479
    for(i = 0; i < 6; i++) {
1480
        dt = &env->segs[i];
1481
        if (i < 3)
1482
            offset = 0x7f84 + i * 12;
1483
        else
1484
            offset = 0x7f2c + (i - 3) * 12;
1485
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486
        stl_phys(sm_state + offset + 8, dt->base);
1487
        stl_phys(sm_state + offset + 4, dt->limit);
1488
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489
    }
1490
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1491

    
1492
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493
    stl_phys(sm_state + 0x7ef8, env->smbase);
1494
#endif
1495
    /* init SMM cpu state */
1496

    
1497
#ifdef TARGET_X86_64
1498
    cpu_load_efer(env, 0);
1499
#endif
1500
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501
    env->eip = 0x00008000;
1502
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503
                           0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509

    
1510
    cpu_x86_update_cr0(env,
1511
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512
    cpu_x86_update_cr4(env, 0);
1513
    env->dr[7] = 0x00000400;
1514
    CC_OP = CC_OP_EFLAGS;
1515
}
1516

    
1517
void helper_rsm(void)
1518
{
1519
    target_ulong sm_state;
1520
    int i, offset;
1521
    uint32_t val;
1522

    
1523
    sm_state = env->smbase + 0x8000;
1524
#ifdef TARGET_X86_64
1525
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526

    
1527
    for(i = 0; i < 6; i++) {
1528
        offset = 0x7e00 + i * 16;
1529
        cpu_x86_load_seg_cache(env, i,
1530
                               lduw_phys(sm_state + offset),
1531
                               ldq_phys(sm_state + offset + 8),
1532
                               ldl_phys(sm_state + offset + 4),
1533
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534
    }
1535

    
1536
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538

    
1539
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543

    
1544
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1545
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546

    
1547
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1549
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551

    
1552
    EAX = ldq_phys(sm_state + 0x7ff8);
1553
    ECX = ldq_phys(sm_state + 0x7ff0);
1554
    EDX = ldq_phys(sm_state + 0x7fe8);
1555
    EBX = ldq_phys(sm_state + 0x7fe0);
1556
    ESP = ldq_phys(sm_state + 0x7fd8);
1557
    EBP = ldq_phys(sm_state + 0x7fd0);
1558
    ESI = ldq_phys(sm_state + 0x7fc8);
1559
    EDI = ldq_phys(sm_state + 0x7fc0);
1560
    for(i = 8; i < 16; i++)
1561
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562
    env->eip = ldq_phys(sm_state + 0x7f78);
1563
    load_eflags(ldl_phys(sm_state + 0x7f70),
1564
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567

    
1568
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571

    
1572
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573
    if (val & 0x20000) {
1574
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575
    }
1576
#else
1577
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1580
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581
    env->eip = ldl_phys(sm_state + 0x7ff0);
1582
    EDI = ldl_phys(sm_state + 0x7fec);
1583
    ESI = ldl_phys(sm_state + 0x7fe8);
1584
    EBP = ldl_phys(sm_state + 0x7fe4);
1585
    ESP = ldl_phys(sm_state + 0x7fe0);
1586
    EBX = ldl_phys(sm_state + 0x7fdc);
1587
    EDX = ldl_phys(sm_state + 0x7fd8);
1588
    ECX = ldl_phys(sm_state + 0x7fd4);
1589
    EAX = ldl_phys(sm_state + 0x7fd0);
1590
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592

    
1593
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1595
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597

    
1598
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602

    
1603
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605

    
1606
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1607
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608

    
1609
    for(i = 0; i < 6; i++) {
1610
        if (i < 3)
1611
            offset = 0x7f84 + i * 12;
1612
        else
1613
            offset = 0x7f2c + (i - 3) * 12;
1614
        cpu_x86_load_seg_cache(env, i,
1615
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616
                               ldl_phys(sm_state + offset + 8),
1617
                               ldl_phys(sm_state + offset + 4),
1618
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619
    }
1620
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621

    
1622
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623
    if (val & 0x20000) {
1624
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625
    }
1626
#endif
1627
    CC_OP = CC_OP_EFLAGS;
1628
    env->hflags &= ~HF_SMM_MASK;
1629
    cpu_smm_update(env);
1630

    
1631
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1633
}
1634

    
1635
#endif /* !CONFIG_USER_ONLY */
1636

    
1637

    
1638
/* division, flags are undefined */
1639

    
1640
void helper_divb_AL(target_ulong t0)
1641
{
1642
    unsigned int num, den, q, r;
1643

    
1644
    num = (EAX & 0xffff);
1645
    den = (t0 & 0xff);
1646
    if (den == 0) {
1647
        raise_exception(EXCP00_DIVZ);
1648
    }
1649
    q = (num / den);
1650
    if (q > 0xff)
1651
        raise_exception(EXCP00_DIVZ);
1652
    q &= 0xff;
1653
    r = (num % den) & 0xff;
1654
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1655
}
1656

    
1657
void helper_idivb_AL(target_ulong t0)
1658
{
1659
    int num, den, q, r;
1660

    
1661
    num = (int16_t)EAX;
1662
    den = (int8_t)t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    if (q != (int8_t)q)
1668
        raise_exception(EXCP00_DIVZ);
1669
    q &= 0xff;
1670
    r = (num % den) & 0xff;
1671
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1672
}
1673

    
1674
void helper_divw_AX(target_ulong t0)
1675
{
1676
    unsigned int num, den, q, r;
1677

    
1678
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679
    den = (t0 & 0xffff);
1680
    if (den == 0) {
1681
        raise_exception(EXCP00_DIVZ);
1682
    }
1683
    q = (num / den);
1684
    if (q > 0xffff)
1685
        raise_exception(EXCP00_DIVZ);
1686
    q &= 0xffff;
1687
    r = (num % den) & 0xffff;
1688
    EAX = (EAX & ~0xffff) | q;
1689
    EDX = (EDX & ~0xffff) | r;
1690
}
1691

    
1692
void helper_idivw_AX(target_ulong t0)
1693
{
1694
    int num, den, q, r;
1695

    
1696
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697
    den = (int16_t)t0;
1698
    if (den == 0) {
1699
        raise_exception(EXCP00_DIVZ);
1700
    }
1701
    q = (num / den);
1702
    if (q != (int16_t)q)
1703
        raise_exception(EXCP00_DIVZ);
1704
    q &= 0xffff;
1705
    r = (num % den) & 0xffff;
1706
    EAX = (EAX & ~0xffff) | q;
1707
    EDX = (EDX & ~0xffff) | r;
1708
}
1709

    
1710
void helper_divl_EAX(target_ulong t0)
1711
{
1712
    unsigned int den, r;
1713
    uint64_t num, q;
1714

    
1715
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716
    den = t0;
1717
    if (den == 0) {
1718
        raise_exception(EXCP00_DIVZ);
1719
    }
1720
    q = (num / den);
1721
    r = (num % den);
1722
    if (q > 0xffffffff)
1723
        raise_exception(EXCP00_DIVZ);
1724
    EAX = (uint32_t)q;
1725
    EDX = (uint32_t)r;
1726
}
1727

    
1728
void helper_idivl_EAX(target_ulong t0)
1729
{
1730
    int den, r;
1731
    int64_t num, q;
1732

    
1733
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734
    den = t0;
1735
    if (den == 0) {
1736
        raise_exception(EXCP00_DIVZ);
1737
    }
1738
    q = (num / den);
1739
    r = (num % den);
1740
    if (q != (int32_t)q)
1741
        raise_exception(EXCP00_DIVZ);
1742
    EAX = (uint32_t)q;
1743
    EDX = (uint32_t)r;
1744
}
1745

    
1746
/* bcd */
1747

    
1748
/* XXX: exception */
1749
void helper_aam(int base)
1750
{
1751
    int al, ah;
1752
    al = EAX & 0xff;
1753
    ah = al / base;
1754
    al = al % base;
1755
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1756
    CC_DST = al;
1757
}
1758

    
1759
void helper_aad(int base)
1760
{
1761
    int al, ah;
1762
    al = EAX & 0xff;
1763
    ah = (EAX >> 8) & 0xff;
1764
    al = ((ah * base) + al) & 0xff;
1765
    EAX = (EAX & ~0xffff) | al;
1766
    CC_DST = al;
1767
}
1768

    
1769
void helper_aaa(void)
1770
{
1771
    int icarry;
1772
    int al, ah, af;
1773
    int eflags;
1774

    
1775
    eflags = helper_cc_compute_all(CC_OP);
1776
    af = eflags & CC_A;
1777
    al = EAX & 0xff;
1778
    ah = (EAX >> 8) & 0xff;
1779

    
1780
    icarry = (al > 0xf9);
1781
    if (((al & 0x0f) > 9 ) || af) {
1782
        al = (al + 6) & 0x0f;
1783
        ah = (ah + 1 + icarry) & 0xff;
1784
        eflags |= CC_C | CC_A;
1785
    } else {
1786
        eflags &= ~(CC_C | CC_A);
1787
        al &= 0x0f;
1788
    }
1789
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1790
    CC_SRC = eflags;
1791
}
1792

    
1793
void helper_aas(void)
1794
{
1795
    int icarry;
1796
    int al, ah, af;
1797
    int eflags;
1798

    
1799
    eflags = helper_cc_compute_all(CC_OP);
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802
    ah = (EAX >> 8) & 0xff;
1803

    
1804
    icarry = (al < 6);
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        al = (al - 6) & 0x0f;
1807
        ah = (ah - 1 - icarry) & 0xff;
1808
        eflags |= CC_C | CC_A;
1809
    } else {
1810
        eflags &= ~(CC_C | CC_A);
1811
        al &= 0x0f;
1812
    }
1813
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1814
    CC_SRC = eflags;
1815
}
1816

    
1817
void helper_daa(void)
1818
{
1819
    int al, af, cf;
1820
    int eflags;
1821

    
1822
    eflags = helper_cc_compute_all(CC_OP);
1823
    cf = eflags & CC_C;
1824
    af = eflags & CC_A;
1825
    al = EAX & 0xff;
1826

    
1827
    eflags = 0;
1828
    if (((al & 0x0f) > 9 ) || af) {
1829
        al = (al + 6) & 0xff;
1830
        eflags |= CC_A;
1831
    }
1832
    if ((al > 0x9f) || cf) {
1833
        al = (al + 0x60) & 0xff;
1834
        eflags |= CC_C;
1835
    }
1836
    EAX = (EAX & ~0xff) | al;
1837
    /* well, speed is not an issue here, so we compute the flags by hand */
1838
    eflags |= (al == 0) << 6; /* zf */
1839
    eflags |= parity_table[al]; /* pf */
1840
    eflags |= (al & 0x80); /* sf */
1841
    CC_SRC = eflags;
1842
}
1843

    
1844
void helper_das(void)
1845
{
1846
    int al, al1, af, cf;
1847
    int eflags;
1848

    
1849
    eflags = helper_cc_compute_all(CC_OP);
1850
    cf = eflags & CC_C;
1851
    af = eflags & CC_A;
1852
    al = EAX & 0xff;
1853

    
1854
    eflags = 0;
1855
    al1 = al;
1856
    if (((al & 0x0f) > 9 ) || af) {
1857
        eflags |= CC_A;
1858
        if (al < 6 || cf)
1859
            eflags |= CC_C;
1860
        al = (al - 6) & 0xff;
1861
    }
1862
    if ((al1 > 0x99) || cf) {
1863
        al = (al - 0x60) & 0xff;
1864
        eflags |= CC_C;
1865
    }
1866
    EAX = (EAX & ~0xff) | al;
1867
    /* well, speed is not an issue here, so we compute the flags by hand */
1868
    eflags |= (al == 0) << 6; /* zf */
1869
    eflags |= parity_table[al]; /* pf */
1870
    eflags |= (al & 0x80); /* sf */
1871
    CC_SRC = eflags;
1872
}
1873

    
1874
void helper_into(int next_eip_addend)
1875
{
1876
    int eflags;
1877
    eflags = helper_cc_compute_all(CC_OP);
1878
    if (eflags & CC_O) {
1879
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880
    }
1881
}
1882

    
1883
void helper_cmpxchg8b(target_ulong a0)
1884
{
1885
    uint64_t d;
1886
    int eflags;
1887

    
1888
    eflags = helper_cc_compute_all(CC_OP);
1889
    d = ldq(a0);
1890
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892
        eflags |= CC_Z;
1893
    } else {
1894
        /* always do the store */
1895
        stq(a0, d); 
1896
        EDX = (uint32_t)(d >> 32);
1897
        EAX = (uint32_t)d;
1898
        eflags &= ~CC_Z;
1899
    }
1900
    CC_SRC = eflags;
1901
}
1902

    
1903
#ifdef TARGET_X86_64
1904
void helper_cmpxchg16b(target_ulong a0)
1905
{
1906
    uint64_t d0, d1;
1907
    int eflags;
1908

    
1909
    if ((a0 & 0xf) != 0)
1910
        raise_exception(EXCP0D_GPF);
1911
    eflags = helper_cc_compute_all(CC_OP);
1912
    d0 = ldq(a0);
1913
    d1 = ldq(a0 + 8);
1914
    if (d0 == EAX && d1 == EDX) {
1915
        stq(a0, EBX);
1916
        stq(a0 + 8, ECX);
1917
        eflags |= CC_Z;
1918
    } else {
1919
        /* always do the store */
1920
        stq(a0, d0); 
1921
        stq(a0 + 8, d1); 
1922
        EDX = d1;
1923
        EAX = d0;
1924
        eflags &= ~CC_Z;
1925
    }
1926
    CC_SRC = eflags;
1927
}
1928
#endif
1929

    
1930
void helper_single_step(void)
1931
{
1932
#ifndef CONFIG_USER_ONLY
1933
    check_hw_breakpoints(env, 1);
1934
    env->dr[6] |= DR6_BS;
1935
#endif
1936
    raise_exception(EXCP01_DB);
1937
}
1938

    
1939
void helper_cpuid(void)
1940
{
1941
    uint32_t eax, ebx, ecx, edx;
1942

    
1943
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944

    
1945
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1946
    EAX = eax;
1947
    EBX = ebx;
1948
    ECX = ecx;
1949
    EDX = edx;
1950
}
1951

    
1952
void helper_enter_level(int level, int data32, target_ulong t1)
1953
{
1954
    target_ulong ssp;
1955
    uint32_t esp_mask, esp, ebp;
1956

    
1957
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958
    ssp = env->segs[R_SS].base;
1959
    ebp = EBP;
1960
    esp = ESP;
1961
    if (data32) {
1962
        /* 32 bit */
1963
        esp -= 4;
1964
        while (--level) {
1965
            esp -= 4;
1966
            ebp -= 4;
1967
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968
        }
1969
        esp -= 4;
1970
        stl(ssp + (esp & esp_mask), t1);
1971
    } else {
1972
        /* 16 bit */
1973
        esp -= 2;
1974
        while (--level) {
1975
            esp -= 2;
1976
            ebp -= 2;
1977
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978
        }
1979
        esp -= 2;
1980
        stw(ssp + (esp & esp_mask), t1);
1981
    }
1982
}
1983

    
1984
#ifdef TARGET_X86_64
1985
void helper_enter64_level(int level, int data64, target_ulong t1)
1986
{
1987
    target_ulong esp, ebp;
1988
    ebp = EBP;
1989
    esp = ESP;
1990

    
1991
    if (data64) {
1992
        /* 64 bit */
1993
        esp -= 8;
1994
        while (--level) {
1995
            esp -= 8;
1996
            ebp -= 8;
1997
            stq(esp, ldq(ebp));
1998
        }
1999
        esp -= 8;
2000
        stq(esp, t1);
2001
    } else {
2002
        /* 16 bit */
2003
        esp -= 2;
2004
        while (--level) {
2005
            esp -= 2;
2006
            ebp -= 2;
2007
            stw(esp, lduw(ebp));
2008
        }
2009
        esp -= 2;
2010
        stw(esp, t1);
2011
    }
2012
}
2013
#endif
2014

    
2015
void helper_lldt(int selector)
2016
{
2017
    SegmentCache *dt;
2018
    uint32_t e1, e2;
2019
    int index, entry_limit;
2020
    target_ulong ptr;
2021

    
2022
    selector &= 0xffff;
2023
    if ((selector & 0xfffc) == 0) {
2024
        /* XXX: NULL selector case: invalid LDT */
2025
        env->ldt.base = 0;
2026
        env->ldt.limit = 0;
2027
    } else {
2028
        if (selector & 0x4)
2029
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030
        dt = &env->gdt;
2031
        index = selector & ~7;
2032
#ifdef TARGET_X86_64
2033
        if (env->hflags & HF_LMA_MASK)
2034
            entry_limit = 15;
2035
        else
2036
#endif
2037
            entry_limit = 7;
2038
        if ((index + entry_limit) > dt->limit)
2039
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040
        ptr = dt->base + index;
2041
        e1 = ldl_kernel(ptr);
2042
        e2 = ldl_kernel(ptr + 4);
2043
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045
        if (!(e2 & DESC_P_MASK))
2046
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047
#ifdef TARGET_X86_64
2048
        if (env->hflags & HF_LMA_MASK) {
2049
            uint32_t e3;
2050
            e3 = ldl_kernel(ptr + 8);
2051
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052
            env->ldt.base |= (target_ulong)e3 << 32;
2053
        } else
2054
#endif
2055
        {
2056
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057
        }
2058
    }
2059
    env->ldt.selector = selector;
2060
}
2061

    
2062
void helper_ltr(int selector)
2063
{
2064
    SegmentCache *dt;
2065
    uint32_t e1, e2;
2066
    int index, type, entry_limit;
2067
    target_ulong ptr;
2068

    
2069
    selector &= 0xffff;
2070
    if ((selector & 0xfffc) == 0) {
2071
        /* NULL selector case: invalid TR */
2072
        env->tr.base = 0;
2073
        env->tr.limit = 0;
2074
        env->tr.flags = 0;
2075
    } else {
2076
        if (selector & 0x4)
2077
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078
        dt = &env->gdt;
2079
        index = selector & ~7;
2080
#ifdef TARGET_X86_64
2081
        if (env->hflags & HF_LMA_MASK)
2082
            entry_limit = 15;
2083
        else
2084
#endif
2085
            entry_limit = 7;
2086
        if ((index + entry_limit) > dt->limit)
2087
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088
        ptr = dt->base + index;
2089
        e1 = ldl_kernel(ptr);
2090
        e2 = ldl_kernel(ptr + 4);
2091
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092
        if ((e2 & DESC_S_MASK) ||
2093
            (type != 1 && type != 9))
2094
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095
        if (!(e2 & DESC_P_MASK))
2096
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097
#ifdef TARGET_X86_64
2098
        if (env->hflags & HF_LMA_MASK) {
2099
            uint32_t e3, e4;
2100
            e3 = ldl_kernel(ptr + 8);
2101
            e4 = ldl_kernel(ptr + 12);
2102
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2105
            env->tr.base |= (target_ulong)e3 << 32;
2106
        } else
2107
#endif
2108
        {
2109
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2110
        }
2111
        e2 |= DESC_TSS_BUSY_MASK;
2112
        stl_kernel(ptr + 4, e2);
2113
    }
2114
    env->tr.selector = selector;
2115
}
2116

    
2117
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118
void helper_load_seg(int seg_reg, int selector)
2119
{
2120
    uint32_t e1, e2;
2121
    int cpl, dpl, rpl;
2122
    SegmentCache *dt;
2123
    int index;
2124
    target_ulong ptr;
2125

    
2126
    selector &= 0xffff;
2127
    cpl = env->hflags & HF_CPL_MASK;
2128
    if ((selector & 0xfffc) == 0) {
2129
        /* null selector case */
2130
        if (seg_reg == R_SS
2131
#ifdef TARGET_X86_64
2132
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133
#endif
2134
            )
2135
            raise_exception_err(EXCP0D_GPF, 0);
2136
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137
    } else {
2138

    
2139
        if (selector & 0x4)
2140
            dt = &env->ldt;
2141
        else
2142
            dt = &env->gdt;
2143
        index = selector & ~7;
2144
        if ((index + 7) > dt->limit)
2145
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146
        ptr = dt->base + index;
2147
        e1 = ldl_kernel(ptr);
2148
        e2 = ldl_kernel(ptr + 4);
2149

    
2150
        if (!(e2 & DESC_S_MASK))
2151
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152
        rpl = selector & 3;
2153
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154
        if (seg_reg == R_SS) {
2155
            /* must be writable segment */
2156
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158
            if (rpl != cpl || dpl != cpl)
2159
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160
        } else {
2161
            /* must be readable segment */
2162
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164

    
2165
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166
                /* if not conforming code, test rights */
2167
                if (dpl < cpl || dpl < rpl)
2168
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169
            }
2170
        }
2171

    
2172
        if (!(e2 & DESC_P_MASK)) {
2173
            if (seg_reg == R_SS)
2174
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175
            else
2176
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177
        }
2178

    
2179
        /* set the access bit if not already set */
2180
        if (!(e2 & DESC_A_MASK)) {
2181
            e2 |= DESC_A_MASK;
2182
            stl_kernel(ptr + 4, e2);
2183
        }
2184

    
2185
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2186
                       get_seg_base(e1, e2),
2187
                       get_seg_limit(e1, e2),
2188
                       e2);
2189
#if 0
2190
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192
#endif
2193
    }
2194
}
2195

    
2196
/* protected mode jump */
2197
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198
                           int next_eip_addend)
2199
{
2200
    int gate_cs, type;
2201
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2202
    target_ulong next_eip;
2203

    
2204
    if ((new_cs & 0xfffc) == 0)
2205
        raise_exception_err(EXCP0D_GPF, 0);
2206
    if (load_segment(&e1, &e2, new_cs) != 0)
2207
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
    cpl = env->hflags & HF_CPL_MASK;
2209
    if (e2 & DESC_S_MASK) {
2210
        if (!(e2 & DESC_CS_MASK))
2211
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213
        if (e2 & DESC_C_MASK) {
2214
            /* conforming code segment */
2215
            if (dpl > cpl)
2216
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
        } else {
2218
            /* non conforming code segment */
2219
            rpl = new_cs & 3;
2220
            if (rpl > cpl)
2221
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
            if (dpl != cpl)
2223
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224
        }
2225
        if (!(e2 & DESC_P_MASK))
2226
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227
        limit = get_seg_limit(e1, e2);
2228
        if (new_eip > limit &&
2229
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232
                       get_seg_base(e1, e2), limit, e2);
2233
        EIP = new_eip;
2234
    } else {
2235
        /* jump to call or task gate */
2236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237
        rpl = new_cs & 3;
2238
        cpl = env->hflags & HF_CPL_MASK;
2239
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240
        switch(type) {
2241
        case 1: /* 286 TSS */
2242
        case 9: /* 386 TSS */
2243
        case 5: /* task gate */
2244
            if (dpl < cpl || dpl < rpl)
2245
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246
            next_eip = env->eip + next_eip_addend;
2247
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248
            CC_OP = CC_OP_EFLAGS;
2249
            break;
2250
        case 4: /* 286 call gate */
2251
        case 12: /* 386 call gate */
2252
            if ((dpl < cpl) || (dpl < rpl))
2253
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254
            if (!(e2 & DESC_P_MASK))
2255
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256
            gate_cs = e1 >> 16;
2257
            new_eip = (e1 & 0xffff);
2258
            if (type == 12)
2259
                new_eip |= (e2 & 0xffff0000);
2260
            if (load_segment(&e1, &e2, gate_cs) != 0)
2261
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263
            /* must be code segment */
2264
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265
                 (DESC_S_MASK | DESC_CS_MASK)))
2266
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270
            if (!(e2 & DESC_P_MASK))
2271
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272
            limit = get_seg_limit(e1, e2);
2273
            if (new_eip > limit)
2274
                raise_exception_err(EXCP0D_GPF, 0);
2275
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276
                                   get_seg_base(e1, e2), limit, e2);
2277
            EIP = new_eip;
2278
            break;
2279
        default:
2280
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281
            break;
2282
        }
2283
    }
2284
}
2285

    
2286
/* real mode call */
2287
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288
                       int shift, int next_eip)
2289
{
2290
    int new_eip;
2291
    uint32_t esp, esp_mask;
2292
    target_ulong ssp;
2293

    
2294
    new_eip = new_eip1;
2295
    esp = ESP;
2296
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297
    ssp = env->segs[R_SS].base;
2298
    if (shift) {
2299
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300
        PUSHL(ssp, esp, esp_mask, next_eip);
2301
    } else {
2302
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303
        PUSHW(ssp, esp, esp_mask, next_eip);
2304
    }
2305

    
2306
    SET_ESP(esp, esp_mask);
2307
    env->eip = new_eip;
2308
    env->segs[R_CS].selector = new_cs;
2309
    env->segs[R_CS].base = (new_cs << 4);
2310
}
2311

    
2312
/* protected mode call */
2313
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2314
                            int shift, int next_eip_addend)
2315
{
2316
    int new_stack, i;
2317
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2318
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2319
    uint32_t val, limit, old_sp_mask;
2320
    target_ulong ssp, old_ssp, next_eip;
2321

    
2322
    next_eip = env->eip + next_eip_addend;
2323
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324
    LOG_PCALL_STATE(env);
2325
    if ((new_cs & 0xfffc) == 0)
2326
        raise_exception_err(EXCP0D_GPF, 0);
2327
    if (load_segment(&e1, &e2, new_cs) != 0)
2328
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329
    cpl = env->hflags & HF_CPL_MASK;
2330
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2331
    if (e2 & DESC_S_MASK) {
2332
        if (!(e2 & DESC_CS_MASK))
2333
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335
        if (e2 & DESC_C_MASK) {
2336
            /* conforming code segment */
2337
            if (dpl > cpl)
2338
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339
        } else {
2340
            /* non conforming code segment */
2341
            rpl = new_cs & 3;
2342
            if (rpl > cpl)
2343
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344
            if (dpl != cpl)
2345
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346
        }
2347
        if (!(e2 & DESC_P_MASK))
2348
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349

    
2350
#ifdef TARGET_X86_64
2351
        /* XXX: check 16/32 bit cases in long mode */
2352
        if (shift == 2) {
2353
            target_ulong rsp;
2354
            /* 64 bit case */
2355
            rsp = ESP;
2356
            PUSHQ(rsp, env->segs[R_CS].selector);
2357
            PUSHQ(rsp, next_eip);
2358
            /* from this point, not restartable */
2359
            ESP = rsp;
2360
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361
                                   get_seg_base(e1, e2),
2362
                                   get_seg_limit(e1, e2), e2);
2363
            EIP = new_eip;
2364
        } else
2365
#endif
2366
        {
2367
            sp = ESP;
2368
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369
            ssp = env->segs[R_SS].base;
2370
            if (shift) {
2371
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372
                PUSHL(ssp, sp, sp_mask, next_eip);
2373
            } else {
2374
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375
                PUSHW(ssp, sp, sp_mask, next_eip);
2376
            }
2377

    
2378
            limit = get_seg_limit(e1, e2);
2379
            if (new_eip > limit)
2380
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381
            /* from this point, not restartable */
2382
            SET_ESP(sp, sp_mask);
2383
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384
                                   get_seg_base(e1, e2), limit, e2);
2385
            EIP = new_eip;
2386
        }
2387
    } else {
2388
        /* check gate type */
2389
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
        rpl = new_cs & 3;
2392
        switch(type) {
2393
        case 1: /* available 286 TSS */
2394
        case 9: /* available 386 TSS */
2395
        case 5: /* task gate */
2396
            if (dpl < cpl || dpl < rpl)
2397
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399
            CC_OP = CC_OP_EFLAGS;
2400
            return;
2401
        case 4: /* 286 call gate */
2402
        case 12: /* 386 call gate */
2403
            break;
2404
        default:
2405
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406
            break;
2407
        }
2408
        shift = type >> 3;
2409

    
2410
        if (dpl < cpl || dpl < rpl)
2411
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412
        /* check valid bit */
2413
        if (!(e2 & DESC_P_MASK))
2414
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2415
        selector = e1 >> 16;
2416
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417
        param_count = e2 & 0x1f;
2418
        if ((selector & 0xfffc) == 0)
2419
            raise_exception_err(EXCP0D_GPF, 0);
2420

    
2421
        if (load_segment(&e1, &e2, selector) != 0)
2422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426
        if (dpl > cpl)
2427
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428
        if (!(e2 & DESC_P_MASK))
2429
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430

    
2431
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432
            /* to inner privilege */
2433
            get_ss_esp_from_tss(&ss, &sp, dpl);
2434
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2435
                        ss, sp, param_count, ESP);
2436
            if ((ss & 0xfffc) == 0)
2437
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438
            if ((ss & 3) != dpl)
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443
            if (ss_dpl != dpl)
2444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445
            if (!(ss_e2 & DESC_S_MASK) ||
2446
                (ss_e2 & DESC_CS_MASK) ||
2447
                !(ss_e2 & DESC_W_MASK))
2448
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449
            if (!(ss_e2 & DESC_P_MASK))
2450
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451

    
2452
            //            push_size = ((param_count * 2) + 8) << shift;
2453

    
2454
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455
            old_ssp = env->segs[R_SS].base;
2456

    
2457
            sp_mask = get_sp_mask(ss_e2);
2458
            ssp = get_seg_base(ss_e1, ss_e2);
2459
            if (shift) {
2460
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461
                PUSHL(ssp, sp, sp_mask, ESP);
2462
                for(i = param_count - 1; i >= 0; i--) {
2463
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464
                    PUSHL(ssp, sp, sp_mask, val);
2465
                }
2466
            } else {
2467
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468
                PUSHW(ssp, sp, sp_mask, ESP);
2469
                for(i = param_count - 1; i >= 0; i--) {
2470
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471
                    PUSHW(ssp, sp, sp_mask, val);
2472
                }
2473
            }
2474
            new_stack = 1;
2475
        } else {
2476
            /* to same privilege */
2477
            sp = ESP;
2478
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479
            ssp = env->segs[R_SS].base;
2480
            //            push_size = (4 << shift);
2481
            new_stack = 0;
2482
        }
2483

    
2484
        if (shift) {
2485
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486
            PUSHL(ssp, sp, sp_mask, next_eip);
2487
        } else {
2488
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489
            PUSHW(ssp, sp, sp_mask, next_eip);
2490
        }
2491

    
2492
        /* from this point, not restartable */
2493

    
2494
        if (new_stack) {
2495
            ss = (ss & ~3) | dpl;
2496
            cpu_x86_load_seg_cache(env, R_SS, ss,
2497
                                   ssp,
2498
                                   get_seg_limit(ss_e1, ss_e2),
2499
                                   ss_e2);
2500
        }
2501

    
2502
        selector = (selector & ~3) | dpl;
2503
        cpu_x86_load_seg_cache(env, R_CS, selector,
2504
                       get_seg_base(e1, e2),
2505
                       get_seg_limit(e1, e2),
2506
                       e2);
2507
        cpu_x86_set_cpl(env, dpl);
2508
        SET_ESP(sp, sp_mask);
2509
        EIP = offset;
2510
    }
2511
}
2512

    
2513
/* real and vm86 mode iret */
2514
void helper_iret_real(int shift)
2515
{
2516
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2517
    target_ulong ssp;
2518
    int eflags_mask;
2519

    
2520
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2521
    sp = ESP;
2522
    ssp = env->segs[R_SS].base;
2523
    if (shift == 1) {
2524
        /* 32 bits */
2525
        POPL(ssp, sp, sp_mask, new_eip);
2526
        POPL(ssp, sp, sp_mask, new_cs);
2527
        new_cs &= 0xffff;
2528
        POPL(ssp, sp, sp_mask, new_eflags);
2529
    } else {
2530
        /* 16 bits */
2531
        POPW(ssp, sp, sp_mask, new_eip);
2532
        POPW(ssp, sp, sp_mask, new_cs);
2533
        POPW(ssp, sp, sp_mask, new_eflags);
2534
    }
2535
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2536
    env->segs[R_CS].selector = new_cs;
2537
    env->segs[R_CS].base = (new_cs << 4);
2538
    env->eip = new_eip;
2539
    if (env->eflags & VM_MASK)
2540
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2541
    else
2542
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2543
    if (shift == 0)
2544
        eflags_mask &= 0xffff;
2545
    load_eflags(new_eflags, eflags_mask);
2546
    env->hflags2 &= ~HF2_NMI_MASK;
2547
}
2548

    
2549
static inline void validate_seg(int seg_reg, int cpl)
2550
{
2551
    int dpl;
2552
    uint32_t e2;
2553

    
2554
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2555
       they may still contain a valid base. I would be interested to
2556
       know how a real x86_64 CPU behaves */
2557
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2558
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2559
        return;
2560

    
2561
    e2 = env->segs[seg_reg].flags;
2562
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2564
        /* data or non conforming code segment */
2565
        if (dpl < cpl) {
2566
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2567
        }
2568
    }
2569
}
2570

    
2571
/* protected mode iret */
2572
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2573
{
2574
    uint32_t new_cs, new_eflags, new_ss;
2575
    uint32_t new_es, new_ds, new_fs, new_gs;
2576
    uint32_t e1, e2, ss_e1, ss_e2;
2577
    int cpl, dpl, rpl, eflags_mask, iopl;
2578
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2579

    
2580
#ifdef TARGET_X86_64
2581
    if (shift == 2)
2582
        sp_mask = -1;
2583
    else
2584
#endif
2585
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2586
    sp = ESP;
2587
    ssp = env->segs[R_SS].base;
2588
    new_eflags = 0; /* avoid warning */
2589
#ifdef TARGET_X86_64
2590
    if (shift == 2) {
2591
        POPQ(sp, new_eip);
2592
        POPQ(sp, new_cs);
2593
        new_cs &= 0xffff;
2594
        if (is_iret) {
2595
            POPQ(sp, new_eflags);
2596
        }
2597
    } else
2598
#endif
2599
    if (shift == 1) {
2600
        /* 32 bits */
2601
        POPL(ssp, sp, sp_mask, new_eip);
2602
        POPL(ssp, sp, sp_mask, new_cs);
2603
        new_cs &= 0xffff;
2604
        if (is_iret) {
2605
            POPL(ssp, sp, sp_mask, new_eflags);
2606
            if (new_eflags & VM_MASK)
2607
                goto return_to_vm86;
2608
        }
2609
    } else {
2610
        /* 16 bits */
2611
        POPW(ssp, sp, sp_mask, new_eip);
2612
        POPW(ssp, sp, sp_mask, new_cs);
2613
        if (is_iret)
2614
            POPW(ssp, sp, sp_mask, new_eflags);
2615
    }
2616
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2617
              new_cs, new_eip, shift, addend);
2618
    LOG_PCALL_STATE(env);
2619
    if ((new_cs & 0xfffc) == 0)
2620
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621
    if (load_segment(&e1, &e2, new_cs) != 0)
2622
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2623
    if (!(e2 & DESC_S_MASK) ||
2624
        !(e2 & DESC_CS_MASK))
2625
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626
    cpl = env->hflags & HF_CPL_MASK;
2627
    rpl = new_cs & 3;
2628
    if (rpl < cpl)
2629
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2631
    if (e2 & DESC_C_MASK) {
2632
        if (dpl > rpl)
2633
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634
    } else {
2635
        if (dpl != rpl)
2636
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637
    }
2638
    if (!(e2 & DESC_P_MASK))
2639
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2640

    
2641
    sp += addend;
2642
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2643
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2644
        /* return to same privilege level */
2645
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2646
                       get_seg_base(e1, e2),
2647
                       get_seg_limit(e1, e2),
2648
                       e2);
2649
    } else {
2650
        /* return to different privilege level */
2651
#ifdef TARGET_X86_64
2652
        if (shift == 2) {
2653
            POPQ(sp, new_esp);
2654
            POPQ(sp, new_ss);
2655
            new_ss &= 0xffff;
2656
        } else
2657
#endif
2658
        if (shift == 1) {
2659
            /* 32 bits */
2660
            POPL(ssp, sp, sp_mask, new_esp);
2661
            POPL(ssp, sp, sp_mask, new_ss);
2662
            new_ss &= 0xffff;
2663
        } else {
2664
            /* 16 bits */
2665
            POPW(ssp, sp, sp_mask, new_esp);
2666
            POPW(ssp, sp, sp_mask, new_ss);
2667
        }
2668
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2669
                    new_ss, new_esp);
2670
        if ((new_ss & 0xfffc) == 0) {
2671
#ifdef TARGET_X86_64
2672
            /* NULL ss is allowed in long mode if cpl != 3*/
2673
            /* XXX: test CS64 ? */
2674
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2675
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2676
                                       0, 0xffffffff,
2677
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2678
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2679
                                       DESC_W_MASK | DESC_A_MASK);
2680
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2681
            } else
2682
#endif
2683
            {
2684
                raise_exception_err(EXCP0D_GPF, 0);
2685
            }
2686
        } else {
2687
            if ((new_ss & 3) != rpl)
2688
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2690
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691
            if (!(ss_e2 & DESC_S_MASK) ||
2692
                (ss_e2 & DESC_CS_MASK) ||
2693
                !(ss_e2 & DESC_W_MASK))
2694
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2696
            if (dpl != rpl)
2697
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2698
            if (!(ss_e2 & DESC_P_MASK))
2699
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2700
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2701
                                   get_seg_base(ss_e1, ss_e2),
2702
                                   get_seg_limit(ss_e1, ss_e2),
2703
                                   ss_e2);
2704
        }
2705

    
2706
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2707
                       get_seg_base(e1, e2),
2708
                       get_seg_limit(e1, e2),
2709
                       e2);
2710
        cpu_x86_set_cpl(env, rpl);
2711
        sp = new_esp;
2712
#ifdef TARGET_X86_64
2713
        if (env->hflags & HF_CS64_MASK)
2714
            sp_mask = -1;
2715
        else
2716
#endif
2717
            sp_mask = get_sp_mask(ss_e2);
2718

    
2719
        /* validate data segments */
2720
        validate_seg(R_ES, rpl);
2721
        validate_seg(R_DS, rpl);
2722
        validate_seg(R_FS, rpl);
2723
        validate_seg(R_GS, rpl);
2724

    
2725
        sp += addend;
2726
    }
2727
    SET_ESP(sp, sp_mask);
2728
    env->eip = new_eip;
2729
    if (is_iret) {
2730
        /* NOTE: 'cpl' is the _old_ CPL */
2731
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2732
        if (cpl == 0)
2733
            eflags_mask |= IOPL_MASK;
2734
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2735
        if (cpl <= iopl)
2736
            eflags_mask |= IF_MASK;
2737
        if (shift == 0)
2738
            eflags_mask &= 0xffff;
2739
        load_eflags(new_eflags, eflags_mask);
2740
    }
2741
    return;
2742

    
2743
 return_to_vm86:
2744
    POPL(ssp, sp, sp_mask, new_esp);
2745
    POPL(ssp, sp, sp_mask, new_ss);
2746
    POPL(ssp, sp, sp_mask, new_es);
2747
    POPL(ssp, sp, sp_mask, new_ds);
2748
    POPL(ssp, sp, sp_mask, new_fs);
2749
    POPL(ssp, sp, sp_mask, new_gs);
2750

    
2751
    /* modify processor state */
2752
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2753
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2754
    load_seg_vm(R_CS, new_cs & 0xffff);
2755
    cpu_x86_set_cpl(env, 3);
2756
    load_seg_vm(R_SS, new_ss & 0xffff);
2757
    load_seg_vm(R_ES, new_es & 0xffff);
2758
    load_seg_vm(R_DS, new_ds & 0xffff);
2759
    load_seg_vm(R_FS, new_fs & 0xffff);
2760
    load_seg_vm(R_GS, new_gs & 0xffff);
2761

    
2762
    env->eip = new_eip & 0xffff;
2763
    ESP = new_esp;
2764
}
2765

    
2766
void helper_iret_protected(int shift, int next_eip)
2767
{
2768
    int tss_selector, type;
2769
    uint32_t e1, e2;
2770

    
2771
    /* specific case for TSS */
2772
    if (env->eflags & NT_MASK) {
2773
#ifdef TARGET_X86_64
2774
        if (env->hflags & HF_LMA_MASK)
2775
            raise_exception_err(EXCP0D_GPF, 0);
2776
#endif
2777
        tss_selector = lduw_kernel(env->tr.base + 0);
2778
        if (tss_selector & 4)
2779
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780
        if (load_segment(&e1, &e2, tss_selector) != 0)
2781
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2783
        /* NOTE: we check both segment and busy TSS */
2784
        if (type != 3)
2785
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2787
    } else {
2788
        helper_ret_protected(shift, 1, 0);
2789
    }
2790
    env->hflags2 &= ~HF2_NMI_MASK;
2791
}
2792

    
2793
void helper_lret_protected(int shift, int addend)
2794
{
2795
    helper_ret_protected(shift, 0, addend);
2796
}
2797

    
2798
void helper_sysenter(void)
2799
{
2800
    if (env->sysenter_cs == 0) {
2801
        raise_exception_err(EXCP0D_GPF, 0);
2802
    }
2803
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2804
    cpu_x86_set_cpl(env, 0);
2805

    
2806
#ifdef TARGET_X86_64
2807
    if (env->hflags & HF_LMA_MASK) {
2808
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809
                               0, 0xffffffff,
2810
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811
                               DESC_S_MASK |
2812
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2813
    } else
2814
#endif
2815
    {
2816
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2817
                               0, 0xffffffff,
2818
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819
                               DESC_S_MASK |
2820
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2821
    }
2822
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2823
                           0, 0xffffffff,
2824
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2825
                           DESC_S_MASK |
2826
                           DESC_W_MASK | DESC_A_MASK);
2827
    ESP = env->sysenter_esp;
2828
    EIP = env->sysenter_eip;
2829
}
2830

    
2831
void helper_sysexit(int dflag)
2832
{
2833
    int cpl;
2834

    
2835
    cpl = env->hflags & HF_CPL_MASK;
2836
    if (env->sysenter_cs == 0 || cpl != 0) {
2837
        raise_exception_err(EXCP0D_GPF, 0);
2838
    }
2839
    cpu_x86_set_cpl(env, 3);
2840
#ifdef TARGET_X86_64
2841
    if (dflag == 2) {
2842
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2847
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2848
                               0, 0xffffffff,
2849
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851
                               DESC_W_MASK | DESC_A_MASK);
2852
    } else
2853
#endif
2854
    {
2855
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2860
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2861
                               0, 0xffffffff,
2862
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864
                               DESC_W_MASK | DESC_A_MASK);
2865
    }
2866
    ESP = ECX;
2867
    EIP = EDX;
2868
}
2869

    
2870
#if defined(CONFIG_USER_ONLY)
2871
target_ulong helper_read_crN(int reg)
2872
{
2873
    return 0;
2874
}
2875

    
2876
void helper_write_crN(int reg, target_ulong t0)
2877
{
2878
}
2879

    
2880
void helper_movl_drN_T0(int reg, target_ulong t0)
2881
{
2882
}
2883
#else
2884
target_ulong helper_read_crN(int reg)
2885
{
2886
    target_ulong val;
2887

    
2888
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2889
    switch(reg) {
2890
    default:
2891
        val = env->cr[reg];
2892
        break;
2893
    case 8:
2894
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2895
            val = cpu_get_apic_tpr(env->apic_state);
2896
        } else {
2897
            val = env->v_tpr;
2898
        }
2899
        break;
2900
    }
2901
    return val;
2902
}
2903

    
2904
void helper_write_crN(int reg, target_ulong t0)
2905
{
2906
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2907
    switch(reg) {
2908
    case 0:
2909
        cpu_x86_update_cr0(env, t0);
2910
        break;
2911
    case 3:
2912
        cpu_x86_update_cr3(env, t0);
2913
        break;
2914
    case 4:
2915
        cpu_x86_update_cr4(env, t0);
2916
        break;
2917
    case 8:
2918
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2919
            cpu_set_apic_tpr(env->apic_state, t0);
2920
        }
2921
        env->v_tpr = t0 & 0x0f;
2922
        break;
2923
    default:
2924
        env->cr[reg] = t0;
2925
        break;
2926
    }
2927
}
2928

    
2929
void helper_movl_drN_T0(int reg, target_ulong t0)
2930
{
2931
    int i;
2932

    
2933
    if (reg < 4) {
2934
        hw_breakpoint_remove(env, reg);
2935
        env->dr[reg] = t0;
2936
        hw_breakpoint_insert(env, reg);
2937
    } else if (reg == 7) {
2938
        for (i = 0; i < 4; i++)
2939
            hw_breakpoint_remove(env, i);
2940
        env->dr[7] = t0;
2941
        for (i = 0; i < 4; i++)
2942
            hw_breakpoint_insert(env, i);
2943
    } else
2944
        env->dr[reg] = t0;
2945
}
2946
#endif
2947

    
2948
void helper_lmsw(target_ulong t0)
2949
{
2950
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951
       if already set to one. */
2952
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2953
    helper_write_crN(0, t0);
2954
}
2955

    
2956
void helper_clts(void)
2957
{
2958
    env->cr[0] &= ~CR0_TS_MASK;
2959
    env->hflags &= ~HF_TS_MASK;
2960
}
2961

    
2962
void helper_invlpg(target_ulong addr)
2963
{
2964
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2965
    tlb_flush_page(env, addr);
2966
}
2967

    
2968
void helper_rdtsc(void)
2969
{
2970
    uint64_t val;
2971

    
2972
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2973
        raise_exception(EXCP0D_GPF);
2974
    }
2975
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2976

    
2977
    val = cpu_get_tsc(env) + env->tsc_offset;
2978
    EAX = (uint32_t)(val);
2979
    EDX = (uint32_t)(val >> 32);
2980
}
2981

    
2982
void helper_rdtscp(void)
2983
{
2984
    helper_rdtsc();
2985
    ECX = (uint32_t)(env->tsc_aux);
2986
}
2987

    
2988
void helper_rdpmc(void)
2989
{
2990
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2991
        raise_exception(EXCP0D_GPF);
2992
    }
2993
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2994
    
2995
    /* currently unimplemented */
2996
    raise_exception_err(EXCP06_ILLOP, 0);
2997
}
2998

    
2999
#if defined(CONFIG_USER_ONLY)
3000
void helper_wrmsr(void)
3001
{
3002
}
3003

    
3004
void helper_rdmsr(void)
3005
{
3006
}
3007
#else
3008
void helper_wrmsr(void)
3009
{
3010
    uint64_t val;
3011

    
3012
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3013

    
3014
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3015

    
3016
    switch((uint32_t)ECX) {
3017
    case MSR_IA32_SYSENTER_CS:
3018
        env->sysenter_cs = val & 0xffff;
3019
        break;
3020
    case MSR_IA32_SYSENTER_ESP:
3021
        env->sysenter_esp = val;
3022
        break;
3023
    case MSR_IA32_SYSENTER_EIP:
3024
        env->sysenter_eip = val;
3025
        break;
3026
    case MSR_IA32_APICBASE:
3027
        cpu_set_apic_base(env->apic_state, val);
3028
        break;
3029
    case MSR_EFER:
3030
        {
3031
            uint64_t update_mask;
3032
            update_mask = 0;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3034
                update_mask |= MSR_EFER_SCE;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3036
                update_mask |= MSR_EFER_LME;
3037
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038
                update_mask |= MSR_EFER_FFXSR;
3039
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3040
                update_mask |= MSR_EFER_NXE;
3041
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3042
                update_mask |= MSR_EFER_SVME;
3043
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3044
                update_mask |= MSR_EFER_FFXSR;
3045
            cpu_load_efer(env, (env->efer & ~update_mask) |
3046
                          (val & update_mask));
3047
        }
3048
        break;
3049
    case MSR_STAR:
3050
        env->star = val;
3051
        break;
3052
    case MSR_PAT:
3053
        env->pat = val;
3054
        break;
3055
    case MSR_VM_HSAVE_PA:
3056
        env->vm_hsave = val;
3057
        break;
3058
#ifdef TARGET_X86_64
3059
    case MSR_LSTAR:
3060
        env->lstar = val;
3061
        break;
3062
    case MSR_CSTAR:
3063
        env->cstar = val;
3064
        break;
3065
    case MSR_FMASK:
3066
        env->fmask = val;
3067
        break;
3068
    case MSR_FSBASE:
3069
        env->segs[R_FS].base = val;
3070
        break;
3071
    case MSR_GSBASE:
3072
        env->segs[R_GS].base = val;
3073
        break;
3074
    case MSR_KERNELGSBASE:
3075
        env->kernelgsbase = val;
3076
        break;
3077
#endif
3078
    case MSR_MTRRphysBase(0):
3079
    case MSR_MTRRphysBase(1):
3080
    case MSR_MTRRphysBase(2):
3081
    case MSR_MTRRphysBase(3):
3082
    case MSR_MTRRphysBase(4):
3083
    case MSR_MTRRphysBase(5):
3084
    case MSR_MTRRphysBase(6):
3085
    case MSR_MTRRphysBase(7):
3086
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3087
        break;
3088
    case MSR_MTRRphysMask(0):
3089
    case MSR_MTRRphysMask(1):
3090
    case MSR_MTRRphysMask(2):
3091
    case MSR_MTRRphysMask(3):
3092
    case MSR_MTRRphysMask(4):
3093
    case MSR_MTRRphysMask(5):
3094
    case MSR_MTRRphysMask(6):
3095
    case MSR_MTRRphysMask(7):
3096
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3097
        break;
3098
    case MSR_MTRRfix64K_00000:
3099
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3100
        break;
3101
    case MSR_MTRRfix16K_80000:
3102
    case MSR_MTRRfix16K_A0000:
3103
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3104
        break;
3105
    case MSR_MTRRfix4K_C0000:
3106
    case MSR_MTRRfix4K_C8000:
3107
    case MSR_MTRRfix4K_D0000:
3108
    case MSR_MTRRfix4K_D8000:
3109
    case MSR_MTRRfix4K_E0000:
3110
    case MSR_MTRRfix4K_E8000:
3111
    case MSR_MTRRfix4K_F0000:
3112
    case MSR_MTRRfix4K_F8000:
3113
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3114
        break;
3115
    case MSR_MTRRdefType:
3116
        env->mtrr_deftype = val;
3117
        break;
3118
    case MSR_MCG_STATUS:
3119
        env->mcg_status = val;
3120
        break;
3121
    case MSR_MCG_CTL:
3122
        if ((env->mcg_cap & MCG_CTL_P)
3123
            && (val == 0 || val == ~(uint64_t)0))
3124
            env->mcg_ctl = val;
3125
        break;
3126
    case MSR_TSC_AUX:
3127
        env->tsc_aux = val;
3128
        break;
3129
    default:
3130
        if ((uint32_t)ECX >= MSR_MC0_CTL
3131
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3132
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3133
            if ((offset & 0x3) != 0
3134
                || (val == 0 || val == ~(uint64_t)0))
3135
                env->mce_banks[offset] = val;
3136
            break;
3137
        }
3138
        /* XXX: exception ? */
3139
        break;
3140
    }
3141
}
3142

    
3143
void helper_rdmsr(void)
3144
{
3145
    uint64_t val;
3146

    
3147
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3148

    
3149
    switch((uint32_t)ECX) {
3150
    case MSR_IA32_SYSENTER_CS:
3151
        val = env->sysenter_cs;
3152
        break;
3153
    case MSR_IA32_SYSENTER_ESP:
3154
        val = env->sysenter_esp;
3155
        break;
3156
    case MSR_IA32_SYSENTER_EIP:
3157
        val = env->sysenter_eip;
3158
        break;
3159
    case MSR_IA32_APICBASE:
3160
        val = cpu_get_apic_base(env->apic_state);
3161
        break;
3162
    case MSR_EFER:
3163
        val = env->efer;
3164
        break;
3165
    case MSR_STAR:
3166
        val = env->star;
3167
        break;
3168
    case MSR_PAT:
3169
        val = env->pat;
3170
        break;
3171
    case MSR_VM_HSAVE_PA:
3172
        val = env->vm_hsave;
3173
        break;
3174
    case MSR_IA32_PERF_STATUS:
3175
        /* tsc_increment_by_tick */
3176
        val = 1000ULL;
3177
        /* CPU multiplier */
3178
        val |= (((uint64_t)4ULL) << 40);
3179
        break;
3180
#ifdef TARGET_X86_64
3181
    case MSR_LSTAR:
3182
        val = env->lstar;
3183
        break;
3184
    case MSR_CSTAR:
3185
        val = env->cstar;
3186
        break;
3187
    case MSR_FMASK:
3188
        val = env->fmask;
3189
        break;
3190
    case MSR_FSBASE:
3191
        val = env->segs[R_FS].base;
3192
        break;
3193
    case MSR_GSBASE:
3194
        val = env->segs[R_GS].base;
3195
        break;
3196
    case MSR_KERNELGSBASE:
3197
        val = env->kernelgsbase;
3198
        break;
3199
    case MSR_TSC_AUX:
3200
        val = env->tsc_aux;
3201
        break;
3202
#endif
3203
    case MSR_MTRRphysBase(0):
3204
    case MSR_MTRRphysBase(1):
3205
    case MSR_MTRRphysBase(2):
3206
    case MSR_MTRRphysBase(3):
3207
    case MSR_MTRRphysBase(4):
3208
    case MSR_MTRRphysBase(5):
3209
    case MSR_MTRRphysBase(6):
3210
    case MSR_MTRRphysBase(7):
3211
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3212
        break;
3213
    case MSR_MTRRphysMask(0):
3214
    case MSR_MTRRphysMask(1):
3215
    case MSR_MTRRphysMask(2):
3216
    case MSR_MTRRphysMask(3):
3217
    case MSR_MTRRphysMask(4):
3218
    case MSR_MTRRphysMask(5):
3219
    case MSR_MTRRphysMask(6):
3220
    case MSR_MTRRphysMask(7):
3221
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3222
        break;
3223
    case MSR_MTRRfix64K_00000:
3224
        val = env->mtrr_fixed[0];
3225
        break;
3226
    case MSR_MTRRfix16K_80000:
3227
    case MSR_MTRRfix16K_A0000:
3228
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3229
        break;
3230
    case MSR_MTRRfix4K_C0000:
3231
    case MSR_MTRRfix4K_C8000:
3232
    case MSR_MTRRfix4K_D0000:
3233
    case MSR_MTRRfix4K_D8000:
3234
    case MSR_MTRRfix4K_E0000:
3235
    case MSR_MTRRfix4K_E8000:
3236
    case MSR_MTRRfix4K_F0000:
3237
    case MSR_MTRRfix4K_F8000:
3238
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3239
        break;
3240
    case MSR_MTRRdefType:
3241
        val = env->mtrr_deftype;
3242
        break;
3243
    case MSR_MTRRcap:
3244
        if (env->cpuid_features & CPUID_MTRR)
3245
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3246
        else
3247
            /* XXX: exception ? */
3248
            val = 0;
3249
        break;
3250
    case MSR_MCG_CAP:
3251
        val = env->mcg_cap;
3252
        break;
3253
    case MSR_MCG_CTL:
3254
        if (env->mcg_cap & MCG_CTL_P)
3255
            val = env->mcg_ctl;
3256
        else
3257
            val = 0;
3258
        break;
3259
    case MSR_MCG_STATUS:
3260
        val = env->mcg_status;
3261
        break;
3262
    default:
3263
        if ((uint32_t)ECX >= MSR_MC0_CTL
3264
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3265
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3266
            val = env->mce_banks[offset];
3267
            break;
3268
        }
3269
        /* XXX: exception ? */
3270
        val = 0;
3271
        break;
3272
    }
3273
    EAX = (uint32_t)(val);
3274
    EDX = (uint32_t)(val >> 32);
3275
}
3276
#endif
3277

    
3278
target_ulong helper_lsl(target_ulong selector1)
3279
{
3280
    unsigned int limit;
3281
    uint32_t e1, e2, eflags, selector;
3282
    int rpl, dpl, cpl, type;
3283

    
3284
    selector = selector1 & 0xffff;
3285
    eflags = helper_cc_compute_all(CC_OP);
3286
    if ((selector & 0xfffc) == 0)
3287
        goto fail;
3288
    if (load_segment(&e1, &e2, selector) != 0)
3289
        goto fail;
3290
    rpl = selector & 3;
3291
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292
    cpl = env->hflags & HF_CPL_MASK;
3293
    if (e2 & DESC_S_MASK) {
3294
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3295
            /* conforming */
3296
        } else {
3297
            if (dpl < cpl || dpl < rpl)
3298
                goto fail;
3299
        }
3300
    } else {
3301
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3302
        switch(type) {
3303
        case 1:
3304
        case 2:
3305
        case 3:
3306
        case 9:
3307
        case 11:
3308
            break;
3309
        default:
3310
            goto fail;
3311
        }
3312
        if (dpl < cpl || dpl < rpl) {
3313
        fail:
3314
            CC_SRC = eflags & ~CC_Z;
3315
            return 0;
3316
        }
3317
    }
3318
    limit = get_seg_limit(e1, e2);
3319
    CC_SRC = eflags | CC_Z;
3320
    return limit;
3321
}
3322

    
3323
target_ulong helper_lar(target_ulong selector1)
3324
{
3325
    uint32_t e1, e2, eflags, selector;
3326
    int rpl, dpl, cpl, type;
3327

    
3328
    selector = selector1 & 0xffff;
3329
    eflags = helper_cc_compute_all(CC_OP);
3330
    if ((selector & 0xfffc) == 0)
3331
        goto fail;
3332
    if (load_segment(&e1, &e2, selector) != 0)
3333
        goto fail;
3334
    rpl = selector & 3;
3335
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3336
    cpl = env->hflags & HF_CPL_MASK;
3337
    if (e2 & DESC_S_MASK) {
3338
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3339
            /* conforming */
3340
        } else {
3341
            if (dpl < cpl || dpl < rpl)
3342
                goto fail;
3343
        }
3344
    } else {
3345
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3346
        switch(type) {
3347
        case 1:
3348
        case 2:
3349
        case 3:
3350
        case 4:
3351
        case 5:
3352
        case 9:
3353
        case 11:
3354
        case 12:
3355
            break;
3356
        default:
3357
            goto fail;
3358
        }
3359
        if (dpl < cpl || dpl < rpl) {
3360
        fail:
3361
            CC_SRC = eflags & ~CC_Z;
3362
            return 0;
3363
        }
3364
    }
3365
    CC_SRC = eflags | CC_Z;
3366
    return e2 & 0x00f0ff00;
3367
}
3368

    
3369
void helper_verr(target_ulong selector1)
3370
{
3371
    uint32_t e1, e2, eflags, selector;
3372
    int rpl, dpl, cpl;
3373

    
3374
    selector = selector1 & 0xffff;
3375
    eflags = helper_cc_compute_all(CC_OP);
3376
    if ((selector & 0xfffc) == 0)
3377
        goto fail;
3378
    if (load_segment(&e1, &e2, selector) != 0)
3379
        goto fail;
3380
    if (!(e2 & DESC_S_MASK))
3381
        goto fail;
3382
    rpl = selector & 3;
3383
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3384
    cpl = env->hflags & HF_CPL_MASK;
3385
    if (e2 & DESC_CS_MASK) {
3386
        if (!(e2 & DESC_R_MASK))
3387
            goto fail;
3388
        if (!(e2 & DESC_C_MASK)) {
3389
            if (dpl < cpl || dpl < rpl)
3390
                goto fail;
3391
        }
3392
    } else {
3393
        if (dpl < cpl || dpl < rpl) {
3394
        fail:
3395
            CC_SRC = eflags & ~CC_Z;
3396
            return;
3397
        }
3398
    }
3399
    CC_SRC = eflags | CC_Z;
3400
}
3401

    
3402
void helper_verw(target_ulong selector1)
3403
{
3404
    uint32_t e1, e2, eflags, selector;
3405
    int rpl, dpl, cpl;
3406

    
3407
    selector = selector1 & 0xffff;
3408
    eflags = helper_cc_compute_all(CC_OP);
3409
    if ((selector & 0xfffc) == 0)
3410
        goto fail;
3411
    if (load_segment(&e1, &e2, selector) != 0)
3412
        goto fail;
3413
    if (!(e2 & DESC_S_MASK))
3414
        goto fail;
3415
    rpl = selector & 3;
3416
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3417
    cpl = env->hflags & HF_CPL_MASK;
3418
    if (e2 & DESC_CS_MASK) {
3419
        goto fail;
3420
    } else {
3421
        if (dpl < cpl || dpl < rpl)
3422
            goto fail;
3423
        if (!(e2 & DESC_W_MASK)) {
3424
        fail:
3425
            CC_SRC = eflags & ~CC_Z;
3426
            return;
3427
        }
3428
    }
3429
    CC_SRC = eflags | CC_Z;
3430
}
3431

    
3432
/* x87 FPU helpers */
3433

    
3434
static void fpu_set_exception(int mask)
3435
{
3436
    env->fpus |= mask;
3437
    if (env->fpus & (~env->fpuc & FPUC_EM))
3438
        env->fpus |= FPUS_SE | FPUS_B;
3439
}
3440

    
3441
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3442
{
3443
    if (b == 0.0)
3444
        fpu_set_exception(FPUS_ZE);
3445
    return a / b;
3446
}
3447

    
3448
static void fpu_raise_exception(void)
3449
{
3450
    if (env->cr[0] & CR0_NE_MASK) {
3451
        raise_exception(EXCP10_COPR);
3452
    }
3453
#if !defined(CONFIG_USER_ONLY)
3454
    else {
3455
        cpu_set_ferr(env);
3456
    }
3457
#endif
3458
}
3459

    
3460
void helper_flds_FT0(uint32_t val)
3461
{
3462
    union {
3463
        float32 f;
3464
        uint32_t i;
3465
    } u;
3466
    u.i = val;
3467
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3468
}
3469

    
3470
void helper_fldl_FT0(uint64_t val)
3471
{
3472
    union {
3473
        float64 f;
3474
        uint64_t i;
3475
    } u;
3476
    u.i = val;
3477
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3478
}
3479

    
3480
void helper_fildl_FT0(int32_t val)
3481
{
3482
    FT0 = int32_to_floatx(val, &env->fp_status);
3483
}
3484

    
3485
void helper_flds_ST0(uint32_t val)
3486
{
3487
    int new_fpstt;
3488
    union {
3489
        float32 f;
3490
        uint32_t i;
3491
    } u;
3492
    new_fpstt = (env->fpstt - 1) & 7;
3493
    u.i = val;
3494
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3495
    env->fpstt = new_fpstt;
3496
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3497
}
3498

    
3499
void helper_fldl_ST0(uint64_t val)
3500
{
3501
    int new_fpstt;
3502
    union {
3503
        float64 f;
3504
        uint64_t i;
3505
    } u;
3506
    new_fpstt = (env->fpstt - 1) & 7;
3507
    u.i = val;
3508
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3509
    env->fpstt = new_fpstt;
3510
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3511
}
3512

    
3513
void helper_fildl_ST0(int32_t val)
3514
{
3515
    int new_fpstt;
3516
    new_fpstt = (env->fpstt - 1) & 7;
3517
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3518
    env->fpstt = new_fpstt;
3519
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3520
}
3521

    
3522
void helper_fildll_ST0(int64_t val)
3523
{
3524
    int new_fpstt;
3525
    new_fpstt = (env->fpstt - 1) & 7;
3526
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3527
    env->fpstt = new_fpstt;
3528
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3529
}
3530

    
3531
uint32_t helper_fsts_ST0(void)
3532
{
3533
    union {
3534
        float32 f;
3535
        uint32_t i;
3536
    } u;
3537
    u.f = floatx_to_float32(ST0, &env->fp_status);
3538
    return u.i;
3539
}
3540

    
3541
uint64_t helper_fstl_ST0(void)
3542
{
3543
    union {
3544
        float64 f;
3545
        uint64_t i;
3546
    } u;
3547
    u.f = floatx_to_float64(ST0, &env->fp_status);
3548
    return u.i;
3549
}
3550

    
3551
int32_t helper_fist_ST0(void)
3552
{
3553
    int32_t val;
3554
    val = floatx_to_int32(ST0, &env->fp_status);
3555
    if (val != (int16_t)val)
3556
        val = -32768;
3557
    return val;
3558
}
3559

    
3560
int32_t helper_fistl_ST0(void)
3561
{
3562
    int32_t val;
3563
    val = floatx_to_int32(ST0, &env->fp_status);
3564
    return val;
3565
}
3566

    
3567
int64_t helper_fistll_ST0(void)
3568
{
3569
    int64_t val;
3570
    val = floatx_to_int64(ST0, &env->fp_status);
3571
    return val;
3572
}
3573

    
3574
int32_t helper_fistt_ST0(void)
3575
{
3576
    int32_t val;
3577
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3578
    if (val != (int16_t)val)
3579
        val = -32768;
3580
    return val;
3581
}
3582

    
3583
int32_t helper_fisttl_ST0(void)
3584
{
3585
    int32_t val;
3586
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3587
    return val;
3588
}
3589

    
3590
int64_t helper_fisttll_ST0(void)
3591
{
3592
    int64_t val;
3593
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3594
    return val;
3595
}
3596

    
3597
void helper_fldt_ST0(target_ulong ptr)
3598
{
3599
    int new_fpstt;
3600
    new_fpstt = (env->fpstt - 1) & 7;
3601
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3602
    env->fpstt = new_fpstt;
3603
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3604
}
3605

    
3606
void helper_fstt_ST0(target_ulong ptr)
3607
{
3608
    helper_fstt(ST0, ptr);
3609
}
3610

    
3611
void helper_fpush(void)
3612
{
3613
    fpush();
3614
}
3615

    
3616
void helper_fpop(void)
3617
{
3618
    fpop();
3619
}
3620

    
3621
void helper_fdecstp(void)
3622
{
3623
    env->fpstt = (env->fpstt - 1) & 7;
3624
    env->fpus &= (~0x4700);
3625
}
3626

    
3627
void helper_fincstp(void)
3628
{
3629
    env->fpstt = (env->fpstt + 1) & 7;
3630
    env->fpus &= (~0x4700);
3631
}
3632

    
3633
/* FPU move */
3634

    
3635
void helper_ffree_STN(int st_index)
3636
{
3637
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3638
}
3639

    
3640
void helper_fmov_ST0_FT0(void)
3641
{
3642
    ST0 = FT0;
3643
}
3644

    
3645
void helper_fmov_FT0_STN(int st_index)
3646
{
3647
    FT0 = ST(st_index);
3648
}
3649

    
3650
void helper_fmov_ST0_STN(int st_index)
3651
{
3652
    ST0 = ST(st_index);
3653
}
3654

    
3655
void helper_fmov_STN_ST0(int st_index)
3656
{
3657
    ST(st_index) = ST0;
3658
}
3659

    
3660
void helper_fxchg_ST0_STN(int st_index)
3661
{
3662
    CPU86_LDouble tmp;
3663
    tmp = ST(st_index);
3664
    ST(st_index) = ST0;
3665
    ST0 = tmp;
3666
}
3667

    
3668
/* FPU operations */
3669

    
3670
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3671

    
3672
void helper_fcom_ST0_FT0(void)
3673
{
3674
    int ret;
3675

    
3676
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3677
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3678
}
3679

    
3680
void helper_fucom_ST0_FT0(void)
3681
{
3682
    int ret;
3683

    
3684
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3685
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3686
}
3687

    
3688
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3689

    
3690
void helper_fcomi_ST0_FT0(void)
3691
{
3692
    int eflags;
3693
    int ret;
3694

    
3695
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3696
    eflags = helper_cc_compute_all(CC_OP);
3697
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3698
    CC_SRC = eflags;
3699
}
3700

    
3701
void helper_fucomi_ST0_FT0(void)
3702
{
3703
    int eflags;
3704
    int ret;
3705

    
3706
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3707
    eflags = helper_cc_compute_all(CC_OP);
3708
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3709
    CC_SRC = eflags;
3710
}
3711

    
3712
void helper_fadd_ST0_FT0(void)
3713
{
3714
    ST0 = floatx_add(ST0, FT0, &env->fp_status);
3715
}
3716

    
3717
void helper_fmul_ST0_FT0(void)
3718
{
3719
    ST0 = floatx_mul(ST0, FT0, &env->fp_status);
3720
}
3721

    
3722
void helper_fsub_ST0_FT0(void)
3723
{
3724
    ST0 = floatx_sub(ST0, FT0, &env->fp_status);
3725
}
3726

    
3727
void helper_fsubr_ST0_FT0(void)
3728
{
3729
    ST0 = floatx_sub(FT0, ST0, &env->fp_status);
3730
}
3731

    
3732
void helper_fdiv_ST0_FT0(void)
3733
{
3734
    ST0 = helper_fdiv(ST0, FT0);
3735
}
3736

    
3737
void helper_fdivr_ST0_FT0(void)
3738
{
3739
    ST0 = helper_fdiv(FT0, ST0);
3740
}
3741

    
3742
/* fp operations between STN and ST0 */
3743

    
3744
void helper_fadd_STN_ST0(int st_index)
3745
{
3746
    ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
3747
}
3748

    
3749
void helper_fmul_STN_ST0(int st_index)
3750
{
3751
    ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
3752
}
3753

    
3754
void helper_fsub_STN_ST0(int st_index)
3755
{
3756
    ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
3757
}
3758

    
3759
void helper_fsubr_STN_ST0(int st_index)
3760
{
3761
    ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
3762
}
3763

    
3764
void helper_fdiv_STN_ST0(int st_index)
3765
{
3766
    CPU86_LDouble *p;
3767
    p = &ST(st_index);
3768
    *p = helper_fdiv(*p, ST0);
3769
}
3770

    
3771
void helper_fdivr_STN_ST0(int st_index)
3772
{
3773
    CPU86_LDouble *p;
3774
    p = &ST(st_index);
3775
    *p = helper_fdiv(ST0, *p);
3776
}
3777

    
3778
/* misc FPU operations */
3779
void helper_fchs_ST0(void)
3780
{
3781
    ST0 = floatx_chs(ST0);
3782
}
3783

    
3784
void helper_fabs_ST0(void)
3785
{
3786
    ST0 = floatx_abs(ST0);
3787
}
3788

    
3789
void helper_fld1_ST0(void)
3790
{
3791
    ST0 = f15rk[1];
3792
}
3793

    
3794
void helper_fldl2t_ST0(void)
3795
{
3796
    ST0 = f15rk[6];
3797
}
3798

    
3799
void helper_fldl2e_ST0(void)
3800
{
3801
    ST0 = f15rk[5];
3802
}
3803

    
3804
void helper_fldpi_ST0(void)
3805
{
3806
    ST0 = f15rk[2];
3807
}
3808

    
3809
void helper_fldlg2_ST0(void)
3810
{
3811
    ST0 = f15rk[3];
3812
}
3813

    
3814
void helper_fldln2_ST0(void)
3815
{
3816
    ST0 = f15rk[4];
3817
}
3818

    
3819
void helper_fldz_ST0(void)
3820
{
3821
    ST0 = f15rk[0];
3822
}
3823

    
3824
void helper_fldz_FT0(void)
3825
{
3826
    FT0 = f15rk[0];
3827
}
3828

    
3829
uint32_t helper_fnstsw(void)
3830
{
3831
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3832
}
3833

    
3834
uint32_t helper_fnstcw(void)
3835
{
3836
    return env->fpuc;
3837
}
3838

    
3839
static void update_fp_status(void)
3840
{
3841
    int rnd_type;
3842

    
3843
    /* set rounding mode */
3844
    switch(env->fpuc & RC_MASK) {
3845
    default:
3846
    case RC_NEAR:
3847
        rnd_type = float_round_nearest_even;
3848
        break;
3849
    case RC_DOWN:
3850
        rnd_type = float_round_down;
3851
        break;
3852
    case RC_UP:
3853
        rnd_type = float_round_up;
3854
        break;
3855
    case RC_CHOP:
3856
        rnd_type = float_round_to_zero;
3857
        break;
3858
    }
3859
    set_float_rounding_mode(rnd_type, &env->fp_status);
3860
#ifdef FLOATX80
3861
    switch((env->fpuc >> 8) & 3) {
3862
    case 0:
3863
        rnd_type = 32;
3864
        break;
3865
    case 2:
3866
        rnd_type = 64;
3867
        break;
3868
    case 3:
3869
    default:
3870
        rnd_type = 80;
3871
        break;
3872
    }
3873
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3874
#endif
3875
}
3876

    
3877
void helper_fldcw(uint32_t val)
3878
{
3879
    env->fpuc = val;
3880
    update_fp_status();
3881
}
3882

    
3883
void helper_fclex(void)
3884
{
3885
    env->fpus &= 0x7f00;
3886
}
3887

    
3888
void helper_fwait(void)
3889
{
3890
    if (env->fpus & FPUS_SE)
3891
        fpu_raise_exception();
3892
}
3893

    
3894
void helper_fninit(void)
3895
{
3896
    env->fpus = 0;
3897
    env->fpstt = 0;
3898
    env->fpuc = 0x37f;
3899
    env->fptags[0] = 1;
3900
    env->fptags[1] = 1;
3901
    env->fptags[2] = 1;
3902
    env->fptags[3] = 1;
3903
    env->fptags[4] = 1;
3904
    env->fptags[5] = 1;
3905
    env->fptags[6] = 1;
3906
    env->fptags[7] = 1;
3907
}
3908

    
3909
/* BCD ops */
3910

    
3911
void helper_fbld_ST0(target_ulong ptr)
3912
{
3913
    CPU86_LDouble tmp;
3914
    uint64_t val;
3915
    unsigned int v;
3916
    int i;
3917

    
3918
    val = 0;
3919
    for(i = 8; i >= 0; i--) {
3920
        v = ldub(ptr + i);
3921
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3922
    }
3923
    tmp = val;
3924
    if (ldub(ptr + 9) & 0x80)
3925
        tmp = -tmp;
3926
    fpush();
3927
    ST0 = tmp;
3928
}
3929

    
3930
void helper_fbst_ST0(target_ulong ptr)
3931
{
3932
    int v;
3933
    target_ulong mem_ref, mem_end;
3934
    int64_t val;
3935

    
3936
    val = floatx_to_int64(ST0, &env->fp_status);
3937
    mem_ref = ptr;
3938
    mem_end = mem_ref + 9;
3939
    if (val < 0) {
3940
        stb(mem_end, 0x80);
3941
        val = -val;
3942
    } else {
3943
        stb(mem_end, 0x00);
3944
    }
3945
    while (mem_ref < mem_end) {
3946
        if (val == 0)
3947
            break;
3948
        v = val % 100;
3949
        val = val / 100;
3950
        v = ((v / 10) << 4) | (v % 10);
3951
        stb(mem_ref++, v);
3952
    }
3953
    while (mem_ref < mem_end) {
3954
        stb(mem_ref++, 0);
3955
    }
3956
}
3957

    
3958
void helper_f2xm1(void)
3959
{
3960
    ST0 = pow(2.0,ST0) - 1.0;
3961
}
3962

    
3963
void helper_fyl2x(void)
3964
{
3965
    CPU86_LDouble fptemp;
3966

    
3967
    fptemp = ST0;
3968
    if (fptemp>0.0){
3969
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3970
        ST1 *= fptemp;
3971
        fpop();
3972
    } else {
3973
        env->fpus &= (~0x4700);
3974
        env->fpus |= 0x400;
3975
    }
3976
}
3977

    
3978
void helper_fptan(void)
3979
{
3980
    CPU86_LDouble fptemp;
3981

    
3982
    fptemp = ST0;
3983
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3984
        env->fpus |= 0x400;
3985
    } else {
3986
        ST0 = tan(fptemp);
3987
        fpush();
3988
        ST0 = 1.0;
3989
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3990
        /* the above code is for  |arg| < 2**52 only */
3991
    }
3992
}
3993

    
3994
void helper_fpatan(void)
3995
{
3996
    CPU86_LDouble fptemp, fpsrcop;
3997

    
3998
    fpsrcop = ST1;
3999
    fptemp = ST0;
4000
    ST1 = atan2(fpsrcop,fptemp);
4001
    fpop();
4002
}
4003

    
4004
void helper_fxtract(void)
4005
{
4006
    CPU86_LDoubleU temp;
4007
    unsigned int expdif;
4008

    
4009
    temp.d = ST0;
4010
    expdif = EXPD(temp) - EXPBIAS;
4011
    /*DP exponent bias*/
4012
    ST0 = expdif;
4013
    fpush();
4014
    BIASEXPONENT(temp);
4015
    ST0 = temp.d;
4016
}
4017

    
4018
void helper_fprem1(void)
4019
{
4020
    CPU86_LDouble dblq, fpsrcop, fptemp;
4021
    CPU86_LDoubleU fpsrcop1, fptemp1;
4022
    int expdif;
4023
    signed long long int q;
4024

    
4025
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4026
        ST0 = 0.0 / 0.0; /* NaN */
4027
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4028
        return;
4029
    }
4030

    
4031
    fpsrcop = ST0;
4032
    fptemp = ST1;
4033
    fpsrcop1.d = fpsrcop;
4034
    fptemp1.d = fptemp;
4035
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4036

    
4037
    if (expdif < 0) {
4038
        /* optimisation? taken from the AMD docs */
4039
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4040
        /* ST0 is unchanged */
4041
        return;
4042
    }
4043

    
4044
    if (expdif < 53) {
4045
        dblq = fpsrcop / fptemp;
4046
        /* round dblq towards nearest integer */
4047
        dblq = rint(dblq);
4048
        ST0 = fpsrcop - fptemp * dblq;
4049

    
4050
        /* convert dblq to q by truncating towards zero */
4051
        if (dblq < 0.0)
4052
           q = (signed long long int)(-dblq);
4053
        else
4054
           q = (signed long long int)dblq;
4055

    
4056
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4057
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4058
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4059
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4060
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4061
    } else {
4062
        env->fpus |= 0x400;  /* C2 <-- 1 */
4063
        fptemp = pow(2.0, expdif - 50);
4064
        fpsrcop = (ST0 / ST1) / fptemp;
4065
        /* fpsrcop = integer obtained by chopping */
4066
        fpsrcop = (fpsrcop < 0.0) ?
4067
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4068
        ST0 -= (ST1 * fpsrcop * fptemp);
4069
    }
4070
}
4071

    
4072
void helper_fprem(void)
4073
{
4074
    CPU86_LDouble dblq, fpsrcop, fptemp;
4075
    CPU86_LDoubleU fpsrcop1, fptemp1;
4076
    int expdif;
4077
    signed long long int q;
4078

    
4079
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4080
       ST0 = 0.0 / 0.0; /* NaN */
4081
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4082
       return;
4083
    }
4084

    
4085
    fpsrcop = (CPU86_LDouble)ST0;
4086
    fptemp = (CPU86_LDouble)ST1;
4087
    fpsrcop1.d = fpsrcop;
4088
    fptemp1.d = fptemp;
4089
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4090

    
4091
    if (expdif < 0) {
4092
        /* optimisation? taken from the AMD docs */
4093
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4094
        /* ST0 is unchanged */
4095
        return;
4096
    }
4097

    
4098
    if ( expdif < 53 ) {
4099
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4100
        /* round dblq towards zero */
4101
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4102
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4103

    
4104
        /* convert dblq to q by truncating towards zero */
4105
        if (dblq < 0.0)
4106
           q = (signed long long int)(-dblq);
4107
        else
4108
           q = (signed long long int)dblq;
4109

    
4110
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4111
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4112
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4113
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4114
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4115
    } else {
4116
        int N = 32 + (expdif % 32); /* as per AMD docs */
4117
        env->fpus |= 0x400;  /* C2 <-- 1 */
4118
        fptemp = pow(2.0, (double)(expdif - N));
4119
        fpsrcop = (ST0 / ST1) / fptemp;
4120
        /* fpsrcop = integer obtained by chopping */
4121
        fpsrcop = (fpsrcop < 0.0) ?
4122
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4123
        ST0 -= (ST1 * fpsrcop * fptemp);
4124
    }
4125
}
4126

    
4127
void helper_fyl2xp1(void)
4128
{
4129
    CPU86_LDouble fptemp;
4130

    
4131
    fptemp = ST0;
4132
    if ((fptemp+1.0)>0.0) {
4133
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4134
        ST1 *= fptemp;
4135
        fpop();
4136
    } else {
4137
        env->fpus &= (~0x4700);
4138
        env->fpus |= 0x400;
4139
    }
4140
}
4141

    
4142
void helper_fsqrt(void)
4143
{
4144
    CPU86_LDouble fptemp;
4145

    
4146
    fptemp = ST0;
4147
    if (fptemp<0.0) {
4148
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4149
        env->fpus |= 0x400;
4150
    }
4151
    ST0 = sqrt(fptemp);
4152
}
4153

    
4154
void helper_fsincos(void)
4155
{
4156
    CPU86_LDouble fptemp;
4157

    
4158
    fptemp = ST0;
4159
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4160
        env->fpus |= 0x400;
4161
    } else {
4162
        ST0 = sin(fptemp);
4163
        fpush();
4164
        ST0 = cos(fptemp);
4165
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4166
        /* the above code is for  |arg| < 2**63 only */
4167
    }
4168
}
4169

    
4170
void helper_frndint(void)
4171
{
4172
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4173
}
4174

    
4175
void helper_fscale(void)
4176
{
4177
    ST0 = ldexp (ST0, (int)(ST1));
4178
}
4179

    
4180
void helper_fsin(void)
4181
{
4182
    CPU86_LDouble fptemp;
4183

    
4184
    fptemp = ST0;
4185
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4186
        env->fpus |= 0x400;
4187
    } else {
4188
        ST0 = sin(fptemp);
4189
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4190
        /* the above code is for  |arg| < 2**53 only */
4191
    }
4192
}
4193

    
4194
void helper_fcos(void)
4195
{
4196
    CPU86_LDouble fptemp;
4197

    
4198
    fptemp = ST0;
4199
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4200
        env->fpus |= 0x400;
4201
    } else {
4202
        ST0 = cos(fptemp);
4203
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4204
        /* the above code is for  |arg5 < 2**63 only */
4205
    }
4206
}
4207

    
4208
void helper_fxam_ST0(void)
4209
{
4210
    CPU86_LDoubleU temp;
4211
    int expdif;
4212

    
4213
    temp.d = ST0;
4214

    
4215
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4216
    if (SIGND(temp))
4217
        env->fpus |= 0x200; /* C1 <-- 1 */
4218

    
4219
    /* XXX: test fptags too */
4220
    expdif = EXPD(temp);
4221
    if (expdif == MAXEXPD) {
4222
#ifdef USE_X86LDOUBLE
4223
        if (MANTD(temp) == 0x8000000000000000ULL)
4224
#else
4225
        if (MANTD(temp) == 0)
4226
#endif
4227
            env->fpus |=  0x500 /*Infinity*/;
4228
        else
4229
            env->fpus |=  0x100 /*NaN*/;
4230
    } else if (expdif == 0) {
4231
        if (MANTD(temp) == 0)
4232
            env->fpus |=  0x4000 /*Zero*/;
4233
        else
4234
            env->fpus |= 0x4400 /*Denormal*/;
4235
    } else {
4236
        env->fpus |= 0x400;
4237
    }
4238
}
4239

    
4240
void helper_fstenv(target_ulong ptr, int data32)
4241
{
4242
    int fpus, fptag, exp, i;
4243
    uint64_t mant;
4244
    CPU86_LDoubleU tmp;
4245

    
4246
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4247
    fptag = 0;
4248
    for (i=7; i>=0; i--) {
4249
        fptag <<= 2;
4250
        if (env->fptags[i]) {
4251
            fptag |= 3;
4252
        } else {
4253
            tmp.d = env->fpregs[i].d;
4254
            exp = EXPD(tmp);
4255
            mant = MANTD(tmp);
4256
            if (exp == 0 && mant == 0) {
4257
                /* zero */
4258
                fptag |= 1;
4259
            } else if (exp == 0 || exp == MAXEXPD
4260
#ifdef USE_X86LDOUBLE
4261
                       || (mant & (1LL << 63)) == 0
4262
#endif
4263
                       ) {
4264
                /* NaNs, infinity, denormal */
4265
                fptag |= 2;
4266
            }
4267
        }
4268
    }
4269
    if (data32) {
4270
        /* 32 bit */
4271
        stl(ptr, env->fpuc);
4272
        stl(ptr + 4, fpus);
4273
        stl(ptr + 8, fptag);
4274
        stl(ptr + 12, 0); /* fpip */
4275
        stl(ptr + 16, 0); /* fpcs */
4276
        stl(ptr + 20, 0); /* fpoo */
4277
        stl(ptr + 24, 0); /* fpos */
4278
    } else {
4279
        /* 16 bit */
4280
        stw(ptr, env->fpuc);
4281
        stw(ptr + 2, fpus);
4282
        stw(ptr + 4, fptag);
4283
        stw(ptr + 6, 0);
4284
        stw(ptr + 8, 0);
4285
        stw(ptr + 10, 0);
4286
        stw(ptr + 12, 0);
4287
    }
4288
}
4289

    
4290
void helper_fldenv(target_ulong ptr, int data32)
4291
{
4292
    int i, fpus, fptag;
4293

    
4294
    if (data32) {
4295
        env->fpuc = lduw(ptr);
4296
        fpus = lduw(ptr + 4);
4297
        fptag = lduw(ptr + 8);
4298
    }
4299
    else {
4300
        env->fpuc = lduw(ptr);
4301
        fpus = lduw(ptr + 2);
4302
        fptag = lduw(ptr + 4);
4303
    }
4304
    env->fpstt = (fpus >> 11) & 7;
4305
    env->fpus = fpus & ~0x3800;
4306
    for(i = 0;i < 8; i++) {
4307
        env->fptags[i] = ((fptag & 3) == 3);
4308
        fptag >>= 2;
4309
    }
4310
}
4311

    
4312
void helper_fsave(target_ulong ptr, int data32)
4313
{
4314
    CPU86_LDouble tmp;
4315
    int i;
4316

    
4317
    helper_fstenv(ptr, data32);
4318

    
4319
    ptr += (14 << data32);
4320
    for(i = 0;i < 8; i++) {
4321
        tmp = ST(i);
4322
        helper_fstt(tmp, ptr);
4323
        ptr += 10;
4324
    }
4325

    
4326
    /* fninit */
4327
    env->fpus = 0;
4328
    env->fpstt = 0;
4329
    env->fpuc = 0x37f;
4330
    env->fptags[0] = 1;
4331
    env->fptags[1] = 1;
4332
    env->fptags[2] = 1;
4333
    env->fptags[3] = 1;
4334
    env->fptags[4] = 1;
4335
    env->fptags[5] = 1;
4336
    env->fptags[6] = 1;
4337
    env->fptags[7] = 1;
4338
}
4339

    
4340
void helper_frstor(target_ulong ptr, int data32)
4341
{
4342
    CPU86_LDouble tmp;
4343
    int i;
4344

    
4345
    helper_fldenv(ptr, data32);
4346
    ptr += (14 << data32);
4347

    
4348
    for(i = 0;i < 8; i++) {
4349
        tmp = helper_fldt(ptr);
4350
        ST(i) = tmp;
4351
        ptr += 10;
4352
    }
4353
}
4354

    
4355
void helper_fxsave(target_ulong ptr, int data64)
4356
{
4357
    int fpus, fptag, i, nb_xmm_regs;
4358
    CPU86_LDouble tmp;
4359
    target_ulong addr;
4360

    
4361
    /* The operand must be 16 byte aligned */
4362
    if (ptr & 0xf) {
4363
        raise_exception(EXCP0D_GPF);
4364
    }
4365

    
4366
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4367
    fptag = 0;
4368
    for(i = 0; i < 8; i++) {
4369
        fptag |= (env->fptags[i] << i);
4370
    }
4371
    stw(ptr, env->fpuc);
4372
    stw(ptr + 2, fpus);
4373
    stw(ptr + 4, fptag ^ 0xff);
4374
#ifdef TARGET_X86_64
4375
    if (data64) {
4376
        stq(ptr + 0x08, 0); /* rip */
4377
        stq(ptr + 0x10, 0); /* rdp */
4378
    } else 
4379
#endif
4380
    {
4381
        stl(ptr + 0x08, 0); /* eip */
4382
        stl(ptr + 0x0c, 0); /* sel  */
4383
        stl(ptr + 0x10, 0); /* dp */
4384
        stl(ptr + 0x14, 0); /* sel  */
4385
    }
4386

    
4387
    addr = ptr + 0x20;
4388
    for(i = 0;i < 8; i++) {
4389
        tmp = ST(i);
4390
        helper_fstt(tmp, addr);
4391
        addr += 16;
4392
    }
4393

    
4394
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4395
        /* XXX: finish it */
4396
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4397
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4398
        if (env->hflags & HF_CS64_MASK)
4399
            nb_xmm_regs = 16;
4400
        else
4401
            nb_xmm_regs = 8;
4402
        addr = ptr + 0xa0;
4403
        /* Fast FXSAVE leaves out the XMM registers */
4404
        if (!(env->efer & MSR_EFER_FFXSR)
4405
          || (env->hflags & HF_CPL_MASK)
4406
          || !(env->hflags & HF_LMA_MASK)) {
4407
            for(i = 0; i < nb_xmm_regs; i++) {
4408
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4409
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4410
                addr += 16;
4411
            }
4412
        }
4413
    }
4414
}
4415

    
4416
void helper_fxrstor(target_ulong ptr, int data64)
4417
{
4418
    int i, fpus, fptag, nb_xmm_regs;
4419
    CPU86_LDouble tmp;
4420
    target_ulong addr;
4421

    
4422
    /* The operand must be 16 byte aligned */
4423
    if (ptr & 0xf) {
4424
        raise_exception(EXCP0D_GPF);
4425
    }
4426

    
4427
    env->fpuc = lduw(ptr);
4428
    fpus = lduw(ptr + 2);
4429
    fptag = lduw(ptr + 4);
4430
    env->fpstt = (fpus >> 11) & 7;
4431
    env->fpus = fpus & ~0x3800;
4432
    fptag ^= 0xff;
4433
    for(i = 0;i < 8; i++) {
4434
        env->fptags[i] = ((fptag >> i) & 1);
4435
    }
4436

    
4437
    addr = ptr + 0x20;
4438
    for(i = 0;i < 8; i++) {
4439
        tmp = helper_fldt(addr);
4440
        ST(i) = tmp;
4441
        addr += 16;
4442
    }
4443

    
4444
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4445
        /* XXX: finish it */
4446
        env->mxcsr = ldl(ptr + 0x18);
4447
        //ldl(ptr + 0x1c);
4448
        if (env->hflags & HF_CS64_MASK)
4449
            nb_xmm_regs = 16;
4450
        else
4451
            nb_xmm_regs = 8;
4452
        addr = ptr + 0xa0;
4453
        /* Fast FXRESTORE leaves out the XMM registers */
4454
        if (!(env->efer & MSR_EFER_FFXSR)
4455
          || (env->hflags & HF_CPL_MASK)
4456
          || !(env->hflags & HF_LMA_MASK)) {
4457
            for(i = 0; i < nb_xmm_regs; i++) {
4458
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4459
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4460
                addr += 16;
4461
            }
4462
        }
4463
    }
4464
}
4465

    
4466
#ifndef USE_X86LDOUBLE
4467

    
4468
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4469
{
4470
    CPU86_LDoubleU temp;
4471
    int e;
4472

    
4473
    temp.d = f;
4474
    /* mantissa */
4475
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4476
    /* exponent + sign */
4477
    e = EXPD(temp) - EXPBIAS + 16383;
4478
    e |= SIGND(temp) >> 16;
4479
    *pexp = e;
4480
}
4481

    
4482
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4483
{
4484
    CPU86_LDoubleU temp;
4485
    int e;
4486
    uint64_t ll;
4487

    
4488
    /* XXX: handle overflow ? */
4489
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4490
    e |= (upper >> 4) & 0x800; /* sign */
4491
    ll = (mant >> 11) & ((1LL << 52) - 1);
4492
#ifdef __arm__
4493
    temp.l.upper = (e << 20) | (ll >> 32);
4494
    temp.l.lower = ll;
4495
#else
4496
    temp.ll = ll | ((uint64_t)e << 52);
4497
#endif
4498
    return temp.d;
4499
}
4500

    
4501
#else
4502

    
4503
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4504
{
4505
    CPU86_LDoubleU temp;
4506

    
4507
    temp.d = f;
4508
    *pmant = temp.l.lower;
4509
    *pexp = temp.l.upper;
4510
}
4511

    
4512
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4513
{
4514
    CPU86_LDoubleU temp;
4515

    
4516
    temp.l.upper = upper;
4517
    temp.l.lower = mant;
4518
    return temp.d;
4519
}
4520
#endif
4521

    
4522
#ifdef TARGET_X86_64
4523

    
4524
//#define DEBUG_MULDIV
4525

    
4526
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4527
{
4528
    *plow += a;
4529
    /* carry test */
4530
    if (*plow < a)
4531
        (*phigh)++;
4532
    *phigh += b;
4533
}
4534

    
4535
static void neg128(uint64_t *plow, uint64_t *phigh)
4536
{
4537
    *plow = ~ *plow;
4538
    *phigh = ~ *phigh;
4539
    add128(plow, phigh, 1, 0);
4540
}
4541

    
4542
/* return TRUE if overflow */
4543
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4544
{
4545
    uint64_t q, r, a1, a0;
4546
    int i, qb, ab;
4547

    
4548
    a0 = *plow;
4549
    a1 = *phigh;
4550
    if (a1 == 0) {
4551
        q = a0 / b;
4552
        r = a0 % b;
4553
        *plow = q;
4554
        *phigh = r;
4555
    } else {
4556
        if (a1 >= b)
4557
            return 1;
4558
        /* XXX: use a better algorithm */
4559
        for(i = 0; i < 64; i++) {
4560
            ab = a1 >> 63;
4561
            a1 = (a1 << 1) | (a0 >> 63);
4562
            if (ab || a1 >= b) {
4563
                a1 -= b;
4564
                qb = 1;
4565
            } else {
4566
                qb = 0;
4567
            }
4568
            a0 = (a0 << 1) | qb;
4569
        }
4570
#if defined(DEBUG_MULDIV)
4571
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4572
               *phigh, *plow, b, a0, a1);
4573
#endif
4574
        *plow = a0;
4575
        *phigh = a1;
4576
    }
4577
    return 0;
4578
}
4579

    
4580
/* return TRUE if overflow */
4581
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4582
{
4583
    int sa, sb;
4584
    sa = ((int64_t)*phigh < 0);
4585
    if (sa)
4586
        neg128(plow, phigh);
4587
    sb = (b < 0);
4588
    if (sb)
4589
        b = -b;
4590
    if (div64(plow, phigh, b) != 0)
4591
        return 1;
4592
    if (sa ^ sb) {
4593
        if (*plow > (1ULL << 63))
4594
            return 1;
4595
        *plow = - *plow;
4596
    } else {
4597
        if (*plow >= (1ULL << 63))
4598
            return 1;
4599
    }
4600
    if (sa)
4601
        *phigh = - *phigh;
4602
    return 0;
4603
}
4604

    
4605
void helper_mulq_EAX_T0(target_ulong t0)
4606
{
4607
    uint64_t r0, r1;
4608

    
4609
    mulu64(&r0, &r1, EAX, t0);
4610
    EAX = r0;
4611
    EDX = r1;
4612
    CC_DST = r0;
4613
    CC_SRC = r1;
4614
}
4615

    
4616
void helper_imulq_EAX_T0(target_ulong t0)
4617
{
4618
    uint64_t r0, r1;
4619

    
4620
    muls64(&r0, &r1, EAX, t0);
4621
    EAX = r0;
4622
    EDX = r1;
4623
    CC_DST = r0;
4624
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4625
}
4626

    
4627
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4628
{
4629
    uint64_t r0, r1;
4630

    
4631
    muls64(&r0, &r1, t0, t1);
4632
    CC_DST = r0;
4633
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4634
    return r0;
4635
}
4636

    
4637
void helper_divq_EAX(target_ulong t0)
4638
{
4639
    uint64_t r0, r1;
4640
    if (t0 == 0) {
4641
        raise_exception(EXCP00_DIVZ);
4642
    }
4643
    r0 = EAX;
4644
    r1 = EDX;
4645
    if (div64(&r0, &r1, t0))
4646
        raise_exception(EXCP00_DIVZ);
4647
    EAX = r0;
4648
    EDX = r1;
4649
}
4650

    
4651
void helper_idivq_EAX(target_ulong t0)
4652
{
4653
    uint64_t r0, r1;
4654
    if (t0 == 0) {
4655
        raise_exception(EXCP00_DIVZ);
4656
    }
4657
    r0 = EAX;
4658
    r1 = EDX;
4659
    if (idiv64(&r0, &r1, t0))
4660
        raise_exception(EXCP00_DIVZ);
4661
    EAX = r0;
4662
    EDX = r1;
4663
}
4664
#endif
4665

    
4666
static void do_hlt(void)
4667
{
4668
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4669
    env->halted = 1;
4670
    env->exception_index = EXCP_HLT;
4671
    cpu_loop_exit();
4672
}
4673

    
4674
void helper_hlt(int next_eip_addend)
4675
{
4676
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4677
    EIP += next_eip_addend;
4678
    
4679
    do_hlt();
4680
}
4681

    
4682
void helper_monitor(target_ulong ptr)
4683
{
4684
    if ((uint32_t)ECX != 0)
4685
        raise_exception(EXCP0D_GPF);
4686
    /* XXX: store address ? */
4687
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4688
}
4689

    
4690
void helper_mwait(int next_eip_addend)
4691
{
4692
    if ((uint32_t)ECX != 0)
4693
        raise_exception(EXCP0D_GPF);
4694
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4695
    EIP += next_eip_addend;
4696

    
4697
    /* XXX: not complete but not completely erroneous */
4698
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4699
        /* more than one CPU: do not sleep because another CPU may
4700
           wake this one */
4701
    } else {
4702
        do_hlt();
4703
    }
4704
}
4705

    
4706
void helper_debug(void)
4707
{
4708
    env->exception_index = EXCP_DEBUG;
4709
    cpu_loop_exit();
4710
}
4711

    
4712
void helper_reset_rf(void)
4713
{
4714
    env->eflags &= ~RF_MASK;
4715
}
4716

    
4717
void helper_raise_interrupt(int intno, int next_eip_addend)
4718
{
4719
    raise_interrupt(intno, 1, 0, next_eip_addend);
4720
}
4721

    
4722
void helper_raise_exception(int exception_index)
4723
{
4724
    raise_exception(exception_index);
4725
}
4726

    
4727
void helper_cli(void)
4728
{
4729
    env->eflags &= ~IF_MASK;
4730
}
4731

    
4732
void helper_sti(void)
4733
{
4734
    env->eflags |= IF_MASK;
4735
}
4736

    
4737
#if 0
4738
/* vm86plus instructions */
4739
void helper_cli_vm(void)
4740
{
4741
    env->eflags &= ~VIF_MASK;
4742
}
4743

4744
void helper_sti_vm(void)
4745
{
4746
    env->eflags |= VIF_MASK;
4747
    if (env->eflags & VIP_MASK) {
4748
        raise_exception(EXCP0D_GPF);
4749
    }
4750
}
4751
#endif
4752

    
4753
void helper_set_inhibit_irq(void)
4754
{
4755
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4756
}
4757

    
4758
void helper_reset_inhibit_irq(void)
4759
{
4760
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4761
}
4762

    
4763
void helper_boundw(target_ulong a0, int v)
4764
{
4765
    int low, high;
4766
    low = ldsw(a0);
4767
    high = ldsw(a0 + 2);
4768
    v = (int16_t)v;
4769
    if (v < low || v > high) {
4770
        raise_exception(EXCP05_BOUND);
4771
    }
4772
}
4773

    
4774
void helper_boundl(target_ulong a0, int v)
4775
{
4776
    int low, high;
4777
    low = ldl(a0);
4778
    high = ldl(a0 + 4);
4779
    if (v < low || v > high) {
4780
        raise_exception(EXCP05_BOUND);
4781
    }
4782
}
4783

    
4784
static float approx_rsqrt(float a)
4785
{
4786
    return 1.0 / sqrt(a);
4787
}
4788

    
4789
static float approx_rcp(float a)
4790
{
4791
    return 1.0 / a;
4792
}
4793

    
4794
#if !defined(CONFIG_USER_ONLY)
4795

    
4796
#define MMUSUFFIX _mmu
4797

    
4798
#define SHIFT 0
4799
#include "softmmu_template.h"
4800

    
4801
#define SHIFT 1
4802
#include "softmmu_template.h"
4803

    
4804
#define SHIFT 2
4805
#include "softmmu_template.h"
4806

    
4807
#define SHIFT 3
4808
#include "softmmu_template.h"
4809

    
4810
#endif
4811

    
4812
#if !defined(CONFIG_USER_ONLY)
4813
/* try to fill the TLB and return an exception if error. If retaddr is
4814
   NULL, it means that the function was called in C code (i.e. not
4815
   from generated code or from helper.c) */
4816
/* XXX: fix it to restore all registers */
4817
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4818
{
4819
    TranslationBlock *tb;
4820
    int ret;
4821
    unsigned long pc;
4822
    CPUX86State *saved_env;
4823

    
4824
    /* XXX: hack to restore env in all cases, even if not called from
4825
       generated code */
4826
    saved_env = env;
4827
    env = cpu_single_env;
4828

    
4829
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4830
    if (ret) {
4831
        if (retaddr) {
4832
            /* now we have a real cpu fault */
4833
            pc = (unsigned long)retaddr;
4834
            tb = tb_find_pc(pc);
4835
            if (tb) {
4836
                /* the PC is inside the translated code. It means that we have
4837
                   a virtual CPU fault */
4838
                cpu_restore_state(tb, env, pc);
4839
            }
4840
        }
4841
        raise_exception_err(env->exception_index, env->error_code);
4842
    }
4843
    env = saved_env;
4844
}
4845
#endif
4846

    
4847
/* Secure Virtual Machine helpers */
4848

    
4849
#if defined(CONFIG_USER_ONLY)
4850

    
4851
void helper_vmrun(int aflag, int next_eip_addend)
4852
{ 
4853
}
4854
void helper_vmmcall(void) 
4855
{ 
4856
}
4857
void helper_vmload(int aflag)
4858
{ 
4859
}
4860
void helper_vmsave(int aflag)
4861
{ 
4862
}
4863
void helper_stgi(void)
4864
{
4865
}
4866
void helper_clgi(void)
4867
{
4868
}
4869
void helper_skinit(void) 
4870
{ 
4871
}
4872
void helper_invlpga(int aflag)
4873
{ 
4874
}
4875
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4876
{ 
4877
}
4878
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4879
{
4880
}
4881

    
4882
void helper_svm_check_io(uint32_t port, uint32_t param, 
4883
                         uint32_t next_eip_addend)
4884
{
4885
}
4886
#else
4887

    
4888
static inline void svm_save_seg(target_phys_addr_t addr,
4889
                                const SegmentCache *sc)
4890
{
4891
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4892
             sc->selector);
4893
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4894
             sc->base);
4895
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4896
             sc->limit);
4897
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4898
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4899
}
4900
                                
4901
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4902
{
4903
    unsigned int flags;
4904

    
4905
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4906
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4907
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4908
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4909
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4910
}
4911

    
4912
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4913
                                      CPUState *env, int seg_reg)
4914
{
4915
    SegmentCache sc1, *sc = &sc1;
4916
    svm_load_seg(addr, sc);
4917
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4918
                           sc->base, sc->limit, sc->flags);
4919
}
4920

    
4921
void helper_vmrun(int aflag, int next_eip_addend)
4922
{
4923
    target_ulong addr;
4924
    uint32_t event_inj;
4925
    uint32_t int_ctl;
4926

    
4927
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4928

    
4929
    if (aflag == 2)
4930
        addr = EAX;
4931
    else
4932
        addr = (uint32_t)EAX;
4933

    
4934
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4935

    
4936
    env->vm_vmcb = addr;
4937

    
4938
    /* save the current CPU state in the hsave page */
4939
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4940
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4941

    
4942
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4943
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4944

    
4945
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4946
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4948
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4949
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4950
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4951

    
4952
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4953
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4954

    
4955
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4956
                  &env->segs[R_ES]);
4957
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4958
                 &env->segs[R_CS]);
4959
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4960
                 &env->segs[R_SS]);
4961
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4962
                 &env->segs[R_DS]);
4963

    
4964
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4965
             EIP + next_eip_addend);
4966
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4967
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4968

    
4969
    /* load the interception bitmaps so we do not need to access the
4970
       vmcb in svm mode */
4971
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4972
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4973
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4974
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4975
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4976
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4977

    
4978
    /* enable intercepts */
4979
    env->hflags |= HF_SVMI_MASK;
4980

    
4981
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4982

    
4983
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4984
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4985

    
4986
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4987
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4988

    
4989
    /* clear exit_info_2 so we behave like the real hardware */
4990
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4991

    
4992
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4993
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4994
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4995
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4996
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4997
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4998
    if (int_ctl & V_INTR_MASKING_MASK) {
4999
        env->v_tpr = int_ctl & V_TPR_MASK;
5000
        env->hflags2 |= HF2_VINTR_MASK;
5001
        if (env->eflags & IF_MASK)
5002
            env->hflags2 |= HF2_HIF_MASK;
5003
    }
5004

    
5005
    cpu_load_efer(env, 
5006
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5007
    env->eflags = 0;
5008
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5009
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5010
    CC_OP = CC_OP_EFLAGS;
5011

    
5012
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5013
                       env, R_ES);
5014
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5015
                       env, R_CS);
5016
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5017
                       env, R_SS);
5018
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5019
                       env, R_DS);
5020

    
5021
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5022
    env->eip = EIP;
5023
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5024
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5025
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5026
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5027
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5028

    
5029
    /* FIXME: guest state consistency checks */
5030

    
5031
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5032
        case TLB_CONTROL_DO_NOTHING:
5033
            break;
5034
        case TLB_CONTROL_FLUSH_ALL_ASID:
5035
            /* FIXME: this is not 100% correct but should work for now */
5036
            tlb_flush(env, 1);
5037
        break;
5038
    }
5039

    
5040
    env->hflags2 |= HF2_GIF_MASK;
5041

    
5042
    if (int_ctl & V_IRQ_MASK) {
5043
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5044
    }
5045

    
5046
    /* maybe we need to inject an event */
5047
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5048
    if (event_inj & SVM_EVTINJ_VALID) {
5049
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5050
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5051
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5052

    
5053
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5054
        /* FIXME: need to implement valid_err */
5055
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5056
        case SVM_EVTINJ_TYPE_INTR:
5057
                env->exception_index = vector;
5058
                env->error_code = event_inj_err;
5059
                env->exception_is_int = 0;
5060
                env->exception_next_eip = -1;
5061
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5062
                /* XXX: is it always correct ? */
5063
                do_interrupt(vector, 0, 0, 0, 1);
5064
                break;
5065
        case SVM_EVTINJ_TYPE_NMI:
5066
                env->exception_index = EXCP02_NMI;
5067
                env->error_code = event_inj_err;
5068
                env->exception_is_int = 0;
5069
                env->exception_next_eip = EIP;
5070
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5071
                cpu_loop_exit();
5072
                break;
5073
        case SVM_EVTINJ_TYPE_EXEPT:
5074
                env->exception_index = vector;
5075
                env->error_code = event_inj_err;
5076
                env->exception_is_int = 0;
5077
                env->exception_next_eip = -1;
5078
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5079
                cpu_loop_exit();
5080
                break;
5081
        case SVM_EVTINJ_TYPE_SOFT:
5082
                env->exception_index = vector;
5083
                env->error_code = event_inj_err;
5084
                env->exception_is_int = 1;
5085
                env->exception_next_eip = EIP;
5086
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5087
                cpu_loop_exit();
5088
                break;
5089
        }
5090
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5091
    }
5092
}
5093

    
5094
void helper_vmmcall(void)
5095
{
5096
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5097
    raise_exception(EXCP06_ILLOP);
5098
}
5099

    
5100
void helper_vmload(int aflag)
5101
{
5102
    target_ulong addr;
5103
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5104

    
5105
    if (aflag == 2)
5106
        addr = EAX;
5107
    else
5108
        addr = (uint32_t)EAX;
5109

    
5110
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5111
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5112
                env->segs[R_FS].base);
5113

    
5114
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5115
                       env, R_FS);
5116
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5117
                       env, R_GS);
5118
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5119
                 &env->tr);
5120
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5121
                 &env->ldt);
5122

    
5123
#ifdef TARGET_X86_64
5124
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5125
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5126
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5127
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5128
#endif
5129
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5130
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5131
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5132
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5133
}
5134

    
5135
void helper_vmsave(int aflag)
5136
{
5137
    target_ulong addr;
5138
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5139

    
5140
    if (aflag == 2)
5141
        addr = EAX;
5142
    else
5143
        addr = (uint32_t)EAX;
5144

    
5145
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5146
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5147
                env->segs[R_FS].base);
5148

    
5149
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5150
                 &env->segs[R_FS]);
5151
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5152
                 &env->segs[R_GS]);
5153
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5154
                 &env->tr);
5155
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5156
                 &env->ldt);
5157

    
5158
#ifdef TARGET_X86_64
5159
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5160
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5161
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5162
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5163
#endif
5164
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5165
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5166
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5167
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5168
}
5169

    
5170
void helper_stgi(void)
5171
{
5172
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5173
    env->hflags2 |= HF2_GIF_MASK;
5174
}
5175

    
5176
void helper_clgi(void)
5177
{
5178
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5179
    env->hflags2 &= ~HF2_GIF_MASK;
5180
}
5181

    
5182
void helper_skinit(void)
5183
{
5184
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5185
    /* XXX: not implemented */
5186
    raise_exception(EXCP06_ILLOP);
5187
}
5188

    
5189
void helper_invlpga(int aflag)
5190
{
5191
    target_ulong addr;
5192
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5193
    
5194
    if (aflag == 2)
5195
        addr = EAX;
5196
    else
5197
        addr = (uint32_t)EAX;
5198

    
5199
    /* XXX: could use the ASID to see if it is needed to do the
5200
       flush */
5201
    tlb_flush_page(env, addr);
5202
}
5203

    
5204
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5205
{
5206
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5207
        return;
5208
    switch(type) {
5209
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5210
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5211
            helper_vmexit(type, param);
5212
        }
5213
        break;
5214
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5215
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5216
            helper_vmexit(type, param);
5217
        }
5218
        break;
5219
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5220
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5221
            helper_vmexit(type, param);
5222
        }
5223
        break;
5224
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5225
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5226
            helper_vmexit(type, param);
5227
        }
5228
        break;
5229
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5230
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5231
            helper_vmexit(type, param);
5232
        }
5233
        break;
5234
    case SVM_EXIT_MSR:
5235
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5236
            /* FIXME: this should be read in at vmrun (faster this way?) */
5237
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5238
            uint32_t t0, t1;
5239
            switch((uint32_t)ECX) {
5240
            case 0 ... 0x1fff:
5241
                t0 = (ECX * 2) % 8;
5242
                t1 = (ECX * 2) / 8;
5243
                break;
5244
            case 0xc0000000 ... 0xc0001fff:
5245
                t0 = (8192 + ECX - 0xc0000000) * 2;
5246
                t1 = (t0 / 8);
5247
                t0 %= 8;
5248
                break;
5249
            case 0xc0010000 ... 0xc0011fff:
5250
                t0 = (16384 + ECX - 0xc0010000) * 2;
5251
                t1 = (t0 / 8);
5252
                t0 %= 8;
5253
                break;
5254
            default:
5255
                helper_vmexit(type, param);
5256
                t0 = 0;
5257
                t1 = 0;
5258
                break;
5259
            }
5260
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5261
                helper_vmexit(type, param);
5262
        }
5263
        break;
5264
    default:
5265
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5266
            helper_vmexit(type, param);
5267
        }
5268
        break;
5269
    }
5270
}
5271

    
5272
void helper_svm_check_io(uint32_t port, uint32_t param, 
5273
                         uint32_t next_eip_addend)
5274
{
5275
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5276
        /* FIXME: this should be read in at vmrun (faster this way?) */
5277
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5278
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5279
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5280
            /* next EIP */
5281
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5282
                     env->eip + next_eip_addend);
5283
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5284
        }
5285
    }
5286
}
5287

    
5288
/* Note: currently only 32 bits of exit_code are used */
5289
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5290
{
5291
    uint32_t int_ctl;
5292

    
5293
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5294
                exit_code, exit_info_1,
5295
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5296
                EIP);
5297

    
5298
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5299
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5300
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5301
    } else {
5302
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5303
    }
5304

    
5305
    /* Save the VM state in the vmcb */
5306
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5307
                 &env->segs[R_ES]);
5308
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5309
                 &env->segs[R_CS]);
5310
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5311
                 &env->segs[R_SS]);
5312
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5313
                 &env->segs[R_DS]);
5314

    
5315
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5316
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5317

    
5318
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5319
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5320

    
5321
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5322
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5324
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5325
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5326

    
5327
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5328
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5329
    int_ctl |= env->v_tpr & V_TPR_MASK;
5330
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5331
        int_ctl |= V_IRQ_MASK;
5332
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5333

    
5334
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5335
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5338
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5339
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5340
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5341

    
5342
    /* Reload the host state from vm_hsave */
5343
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5344
    env->hflags &= ~HF_SVMI_MASK;
5345
    env->intercept = 0;
5346
    env->intercept_exceptions = 0;
5347
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5348
    env->tsc_offset = 0;
5349

    
5350
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5351
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5352

    
5353
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5354
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5355

    
5356
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5357
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5358
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5359
    /* we need to set the efer after the crs so the hidden flags get
5360
       set properly */
5361
    cpu_load_efer(env, 
5362
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5363
    env->eflags = 0;
5364
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5365
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5366
    CC_OP = CC_OP_EFLAGS;
5367

    
5368
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5369
                       env, R_ES);
5370
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5371
                       env, R_CS);
5372
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5373
                       env, R_SS);
5374
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5375
                       env, R_DS);
5376

    
5377
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5378
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5379
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5380

    
5381
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5382
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5383

    
5384
    /* other setups */
5385
    cpu_x86_set_cpl(env, 0);
5386
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5387
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5388

    
5389
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5390
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5391
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5392
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5393
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5394

    
5395
    env->hflags2 &= ~HF2_GIF_MASK;
5396
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5397

    
5398
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5399

    
5400
    /* Clears the TSC_OFFSET inside the processor. */
5401

    
5402
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5403
       from the page table indicated the host's CR3. If the PDPEs contain
5404
       illegal state, the processor causes a shutdown. */
5405

    
5406
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5407
    env->cr[0] |= CR0_PE_MASK;
5408
    env->eflags &= ~VM_MASK;
5409

    
5410
    /* Disables all breakpoints in the host DR7 register. */
5411

    
5412
    /* Checks the reloaded host state for consistency. */
5413

    
5414
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5415
       host's code segment or non-canonical (in the case of long mode), a
5416
       #GP fault is delivered inside the host.) */
5417

    
5418
    /* remove any pending exception */
5419
    env->exception_index = -1;
5420
    env->error_code = 0;
5421
    env->old_exception = -1;
5422

    
5423
    cpu_loop_exit();
5424
}
5425

    
5426
#endif
5427

    
5428
/* MMX/SSE */
5429
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5430
void helper_enter_mmx(void)
5431
{
5432
    env->fpstt = 0;
5433
    *(uint32_t *)(env->fptags) = 0;
5434
    *(uint32_t *)(env->fptags + 4) = 0;
5435
}
5436

    
5437
void helper_emms(void)
5438
{
5439
    /* set to empty state */
5440
    *(uint32_t *)(env->fptags) = 0x01010101;
5441
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5442
}
5443

    
5444
/* XXX: suppress */
5445
void helper_movq(void *d, void *s)
5446
{
5447
    *(uint64_t *)d = *(uint64_t *)s;
5448
}
5449

    
5450
#define SHIFT 0
5451
#include "ops_sse.h"
5452

    
5453
#define SHIFT 1
5454
#include "ops_sse.h"
5455

    
5456
#define SHIFT 0
5457
#include "helper_template.h"
5458
#undef SHIFT
5459

    
5460
#define SHIFT 1
5461
#include "helper_template.h"
5462
#undef SHIFT
5463

    
5464
#define SHIFT 2
5465
#include "helper_template.h"
5466
#undef SHIFT
5467

    
5468
#ifdef TARGET_X86_64
5469

    
5470
#define SHIFT 3
5471
#include "helper_template.h"
5472
#undef SHIFT
5473

    
5474
#endif
5475

    
5476
/* bit operations */
5477
target_ulong helper_bsf(target_ulong t0)
5478
{
5479
    int count;
5480
    target_ulong res;
5481

    
5482
    res = t0;
5483
    count = 0;
5484
    while ((res & 1) == 0) {
5485
        count++;
5486
        res >>= 1;
5487
    }
5488
    return count;
5489
}
5490

    
5491
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5492
{
5493
    int count;
5494
    target_ulong res, mask;
5495

    
5496
    if (wordsize > 0 && t0 == 0) {
5497
        return wordsize;
5498
    }
5499
    res = t0;
5500
    count = TARGET_LONG_BITS - 1;
5501
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5502
    while ((res & mask) == 0) {
5503
        count--;
5504
        res <<= 1;
5505
    }
5506
    if (wordsize > 0) {
5507
        return wordsize - 1 - count;
5508
    }
5509
    return count;
5510
}
5511

    
5512
target_ulong helper_bsr(target_ulong t0)
5513
{
5514
        return helper_lzcnt(t0, 0);
5515
}
5516

    
5517
static int compute_all_eflags(void)
5518
{
5519
    return CC_SRC;
5520
}
5521

    
5522
static int compute_c_eflags(void)
5523
{
5524
    return CC_SRC & CC_C;
5525
}
5526

    
5527
uint32_t helper_cc_compute_all(int op)
5528
{
5529
    switch (op) {
5530
    default: /* should never happen */ return 0;
5531

    
5532
    case CC_OP_EFLAGS: return compute_all_eflags();
5533

    
5534
    case CC_OP_MULB: return compute_all_mulb();
5535
    case CC_OP_MULW: return compute_all_mulw();
5536
    case CC_OP_MULL: return compute_all_mull();
5537

    
5538
    case CC_OP_ADDB: return compute_all_addb();
5539
    case CC_OP_ADDW: return compute_all_addw();
5540
    case CC_OP_ADDL: return compute_all_addl();
5541

    
5542
    case CC_OP_ADCB: return compute_all_adcb();
5543
    case CC_OP_ADCW: return compute_all_adcw();
5544
    case CC_OP_ADCL: return compute_all_adcl();
5545

    
5546
    case CC_OP_SUBB: return compute_all_subb();
5547
    case CC_OP_SUBW: return compute_all_subw();
5548
    case CC_OP_SUBL: return compute_all_subl();
5549

    
5550
    case CC_OP_SBBB: return compute_all_sbbb();
5551
    case CC_OP_SBBW: return compute_all_sbbw();
5552
    case CC_OP_SBBL: return compute_all_sbbl();
5553

    
5554
    case CC_OP_LOGICB: return compute_all_logicb();
5555
    case CC_OP_LOGICW: return compute_all_logicw();
5556
    case CC_OP_LOGICL: return compute_all_logicl();
5557

    
5558
    case CC_OP_INCB: return compute_all_incb();
5559
    case CC_OP_INCW: return compute_all_incw();
5560
    case CC_OP_INCL: return compute_all_incl();
5561

    
5562
    case CC_OP_DECB: return compute_all_decb();
5563
    case CC_OP_DECW: return compute_all_decw();
5564
    case CC_OP_DECL: return compute_all_decl();
5565

    
5566
    case CC_OP_SHLB: return compute_all_shlb();
5567
    case CC_OP_SHLW: return compute_all_shlw();
5568
    case CC_OP_SHLL: return compute_all_shll();
5569

    
5570
    case CC_OP_SARB: return compute_all_sarb();
5571
    case CC_OP_SARW: return compute_all_sarw();
5572
    case CC_OP_SARL: return compute_all_sarl();
5573

    
5574
#ifdef TARGET_X86_64
5575
    case CC_OP_MULQ: return compute_all_mulq();
5576

    
5577
    case CC_OP_ADDQ: return compute_all_addq();
5578

    
5579
    case CC_OP_ADCQ: return compute_all_adcq();
5580

    
5581
    case CC_OP_SUBQ: return compute_all_subq();
5582

    
5583
    case CC_OP_SBBQ: return compute_all_sbbq();
5584

    
5585
    case CC_OP_LOGICQ: return compute_all_logicq();
5586

    
5587
    case CC_OP_INCQ: return compute_all_incq();
5588

    
5589
    case CC_OP_DECQ: return compute_all_decq();
5590

    
5591
    case CC_OP_SHLQ: return compute_all_shlq();
5592

    
5593
    case CC_OP_SARQ: return compute_all_sarq();
5594
#endif
5595
    }
5596
}
5597

    
5598
uint32_t helper_cc_compute_c(int op)
5599
{
5600
    switch (op) {
5601
    default: /* should never happen */ return 0;
5602

    
5603
    case CC_OP_EFLAGS: return compute_c_eflags();
5604

    
5605
    case CC_OP_MULB: return compute_c_mull();
5606
    case CC_OP_MULW: return compute_c_mull();
5607
    case CC_OP_MULL: return compute_c_mull();
5608

    
5609
    case CC_OP_ADDB: return compute_c_addb();
5610
    case CC_OP_ADDW: return compute_c_addw();
5611
    case CC_OP_ADDL: return compute_c_addl();
5612

    
5613
    case CC_OP_ADCB: return compute_c_adcb();
5614
    case CC_OP_ADCW: return compute_c_adcw();
5615
    case CC_OP_ADCL: return compute_c_adcl();
5616

    
5617
    case CC_OP_SUBB: return compute_c_subb();
5618
    case CC_OP_SUBW: return compute_c_subw();
5619
    case CC_OP_SUBL: return compute_c_subl();
5620

    
5621
    case CC_OP_SBBB: return compute_c_sbbb();
5622
    case CC_OP_SBBW: return compute_c_sbbw();
5623
    case CC_OP_SBBL: return compute_c_sbbl();
5624

    
5625
    case CC_OP_LOGICB: return compute_c_logicb();
5626
    case CC_OP_LOGICW: return compute_c_logicw();
5627
    case CC_OP_LOGICL: return compute_c_logicl();
5628

    
5629
    case CC_OP_INCB: return compute_c_incl();
5630
    case CC_OP_INCW: return compute_c_incl();
5631
    case CC_OP_INCL: return compute_c_incl();
5632

    
5633
    case CC_OP_DECB: return compute_c_incl();
5634
    case CC_OP_DECW: return compute_c_incl();
5635
    case CC_OP_DECL: return compute_c_incl();
5636

    
5637
    case CC_OP_SHLB: return compute_c_shlb();
5638
    case CC_OP_SHLW: return compute_c_shlw();
5639
    case CC_OP_SHLL: return compute_c_shll();
5640

    
5641
    case CC_OP_SARB: return compute_c_sarl();
5642
    case CC_OP_SARW: return compute_c_sarl();
5643
    case CC_OP_SARL: return compute_c_sarl();
5644

    
5645
#ifdef TARGET_X86_64
5646
    case CC_OP_MULQ: return compute_c_mull();
5647

    
5648
    case CC_OP_ADDQ: return compute_c_addq();
5649

    
5650
    case CC_OP_ADCQ: return compute_c_adcq();
5651

    
5652
    case CC_OP_SUBQ: return compute_c_subq();
5653

    
5654
    case CC_OP_SBBQ: return compute_c_sbbq();
5655

    
5656
    case CC_OP_LOGICQ: return compute_c_logicq();
5657

    
5658
    case CC_OP_INCQ: return compute_c_incl();
5659

    
5660
    case CC_OP_DECQ: return compute_c_incl();
5661

    
5662
    case CC_OP_SHLQ: return compute_c_shlq();
5663

    
5664
    case CC_OP_SARQ: return compute_c_sarl();
5665
#endif
5666
    }
5667
}