Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ eef26553

History | View | Annotate | Download (156.6 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352

    
353
    /* NOTE: we must avoid memory exceptions during the task switch,
354
       so we make dummy accesses before */
355
    /* XXX: it can still fail in some cases, so a bigger hack is
356
       necessary to valid the TLB after having done the accesses */
357

    
358
    v1 = ldub_kernel(env->tr.base);
359
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360
    stb_kernel(env->tr.base, v1);
361
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
362

    
363
    /* clear busy bit (it is restartable) */
364
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365
        target_ulong ptr;
366
        uint32_t e2;
367
        ptr = env->gdt.base + (env->tr.selector & ~7);
368
        e2 = ldl_kernel(ptr + 4);
369
        e2 &= ~DESC_TSS_BUSY_MASK;
370
        stl_kernel(ptr + 4, e2);
371
    }
372
    old_eflags = compute_eflags();
373
    if (source == SWITCH_TSS_IRET)
374
        old_eflags &= ~NT_MASK;
375

    
376
    /* save the current state in the old TSS */
377
    if (type & 8) {
378
        /* 32 bit */
379
        stl_kernel(env->tr.base + 0x20, next_eip);
380
        stl_kernel(env->tr.base + 0x24, old_eflags);
381
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403
        for(i = 0; i < 4; i++)
404
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405
    }
406

    
407
    /* now if an exception occurs, it will occurs in the next task
408
       context */
409

    
410
    if (source == SWITCH_TSS_CALL) {
411
        stw_kernel(tss_base, env->tr.selector);
412
        new_eflags |= NT_MASK;
413
    }
414

    
415
    /* set busy bit */
416
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417
        target_ulong ptr;
418
        uint32_t e2;
419
        ptr = env->gdt.base + (tss_selector & ~7);
420
        e2 = ldl_kernel(ptr + 4);
421
        e2 |= DESC_TSS_BUSY_MASK;
422
        stl_kernel(ptr + 4, e2);
423
    }
424

    
425
    /* set the new CPU state */
426
    /* from this point, any exception which occurs can give problems */
427
    env->cr[0] |= CR0_TS_MASK;
428
    env->hflags |= HF_TS_MASK;
429
    env->tr.selector = tss_selector;
430
    env->tr.base = tss_base;
431
    env->tr.limit = tss_limit;
432
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433

    
434
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435
        cpu_x86_update_cr3(env, new_cr3);
436
    }
437

    
438
    /* load all registers without an exception, then reload them with
439
       possible exception */
440
    env->eip = new_eip;
441
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443
    if (!(type & 8))
444
        eflags_mask &= 0xffff;
445
    load_eflags(new_eflags, eflags_mask);
446
    /* XXX: what to do in 16 bit case ? */
447
    EAX = new_regs[0];
448
    ECX = new_regs[1];
449
    EDX = new_regs[2];
450
    EBX = new_regs[3];
451
    ESP = new_regs[4];
452
    EBP = new_regs[5];
453
    ESI = new_regs[6];
454
    EDI = new_regs[7];
455
    if (new_eflags & VM_MASK) {
456
        for(i = 0; i < 6; i++)
457
            load_seg_vm(i, new_segs[i]);
458
        /* in vm86, CPL is always 3 */
459
        cpu_x86_set_cpl(env, 3);
460
    } else {
461
        /* CPL is set the RPL of CS */
462
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463
        /* first just selectors as the rest may trigger exceptions */
464
        for(i = 0; i < 6; i++)
465
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466
    }
467

    
468
    env->ldt.selector = new_ldt & ~4;
469
    env->ldt.base = 0;
470
    env->ldt.limit = 0;
471
    env->ldt.flags = 0;
472

    
473
    /* load the LDT */
474
    if (new_ldt & 4)
475
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476

    
477
    if ((new_ldt & 0xfffc) != 0) {
478
        dt = &env->gdt;
479
        index = new_ldt & ~7;
480
        if ((index + 7) > dt->limit)
481
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482
        ptr = dt->base + index;
483
        e1 = ldl_kernel(ptr);
484
        e2 = ldl_kernel(ptr + 4);
485
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487
        if (!(e2 & DESC_P_MASK))
488
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
490
    }
491

    
492
    /* load the segments */
493
    if (!(new_eflags & VM_MASK)) {
494
        tss_load_seg(R_CS, new_segs[R_CS]);
495
        tss_load_seg(R_SS, new_segs[R_SS]);
496
        tss_load_seg(R_ES, new_segs[R_ES]);
497
        tss_load_seg(R_DS, new_segs[R_DS]);
498
        tss_load_seg(R_FS, new_segs[R_FS]);
499
        tss_load_seg(R_GS, new_segs[R_GS]);
500
    }
501

    
502
    /* check that EIP is in the CS segment limits */
503
    if (new_eip > env->segs[R_CS].limit) {
504
        /* XXX: different exception if CALL ? */
505
        raise_exception_err(EXCP0D_GPF, 0);
506
    }
507

    
508
#ifndef CONFIG_USER_ONLY
509
    /* reset local breakpoints */
510
    if (env->dr[7] & 0x55) {
511
        for (i = 0; i < 4; i++) {
512
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513
                hw_breakpoint_remove(env, i);
514
        }
515
        env->dr[7] &= ~0x55;
516
    }
517
#endif
518
}
519

    
520
/* check if Port I/O is allowed in TSS */
521
static inline void check_io(int addr, int size)
522
{
523
    int io_offset, val, mask;
524

    
525
    /* TSS must be a valid 32 bit one */
526
    if (!(env->tr.flags & DESC_P_MASK) ||
527
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528
        env->tr.limit < 103)
529
        goto fail;
530
    io_offset = lduw_kernel(env->tr.base + 0x66);
531
    io_offset += (addr >> 3);
532
    /* Note: the check needs two bytes */
533
    if ((io_offset + 1) > env->tr.limit)
534
        goto fail;
535
    val = lduw_kernel(env->tr.base + io_offset);
536
    val >>= (addr & 7);
537
    mask = (1 << size) - 1;
538
    /* all bits must be zero to allow the I/O */
539
    if ((val & mask) != 0) {
540
    fail:
541
        raise_exception_err(EXCP0D_GPF, 0);
542
    }
543
}
544

    
545
void helper_check_iob(uint32_t t0)
546
{
547
    check_io(t0, 1);
548
}
549

    
550
void helper_check_iow(uint32_t t0)
551
{
552
    check_io(t0, 2);
553
}
554

    
555
void helper_check_iol(uint32_t t0)
556
{
557
    check_io(t0, 4);
558
}
559

    
560
void helper_outb(uint32_t port, uint32_t data)
561
{
562
    cpu_outb(env, port, data & 0xff);
563
}
564

    
565
target_ulong helper_inb(uint32_t port)
566
{
567
    return cpu_inb(env, port);
568
}
569

    
570
void helper_outw(uint32_t port, uint32_t data)
571
{
572
    cpu_outw(env, port, data & 0xffff);
573
}
574

    
575
target_ulong helper_inw(uint32_t port)
576
{
577
    return cpu_inw(env, port);
578
}
579

    
580
void helper_outl(uint32_t port, uint32_t data)
581
{
582
    cpu_outl(env, port, data);
583
}
584

    
585
target_ulong helper_inl(uint32_t port)
586
{
587
    return cpu_inl(env, port);
588
}
589

    
590
static inline unsigned int get_sp_mask(unsigned int e2)
591
{
592
    if (e2 & DESC_B_MASK)
593
        return 0xffffffff;
594
    else
595
        return 0xffff;
596
}
597

    
598
#ifdef TARGET_X86_64
599
#define SET_ESP(val, sp_mask)\
600
do {\
601
    if ((sp_mask) == 0xffff)\
602
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603
    else if ((sp_mask) == 0xffffffffLL)\
604
        ESP = (uint32_t)(val);\
605
    else\
606
        ESP = (val);\
607
} while (0)
608
#else
609
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
610
#endif
611

    
612
/* in 64-bit machines, this can overflow. So this segment addition macro
613
 * can be used to trim the value to 32-bit whenever needed */
614
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
615

    
616
/* XXX: add a is_user flag to have proper security support */
617
#define PUSHW(ssp, sp, sp_mask, val)\
618
{\
619
    sp -= 2;\
620
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
621
}
622

    
623
#define PUSHL(ssp, sp, sp_mask, val)\
624
{\
625
    sp -= 4;\
626
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
627
}
628

    
629
#define POPW(ssp, sp, sp_mask, val)\
630
{\
631
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
632
    sp += 2;\
633
}
634

    
635
#define POPL(ssp, sp, sp_mask, val)\
636
{\
637
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
638
    sp += 4;\
639
}
640

    
641
/* protected mode interrupt */
642
static void do_interrupt_protected(int intno, int is_int, int error_code,
643
                                   unsigned int next_eip, int is_hw)
644
{
645
    SegmentCache *dt;
646
    target_ulong ptr, ssp;
647
    int type, dpl, selector, ss_dpl, cpl;
648
    int has_error_code, new_stack, shift;
649
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
650
    uint32_t old_eip, sp_mask;
651

    
652
    has_error_code = 0;
653
    if (!is_int && !is_hw) {
654
        switch(intno) {
655
        case 8:
656
        case 10:
657
        case 11:
658
        case 12:
659
        case 13:
660
        case 14:
661
        case 17:
662
            has_error_code = 1;
663
            break;
664
        }
665
    }
666
    if (is_int)
667
        old_eip = next_eip;
668
    else
669
        old_eip = env->eip;
670

    
671
    dt = &env->idt;
672
    if (intno * 8 + 7 > dt->limit)
673
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674
    ptr = dt->base + intno * 8;
675
    e1 = ldl_kernel(ptr);
676
    e2 = ldl_kernel(ptr + 4);
677
    /* check gate type */
678
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679
    switch(type) {
680
    case 5: /* task gate */
681
        /* must do that check here to return the correct error code */
682
        if (!(e2 & DESC_P_MASK))
683
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685
        if (has_error_code) {
686
            int type;
687
            uint32_t mask;
688
            /* push the error code */
689
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690
            shift = type >> 3;
691
            if (env->segs[R_SS].flags & DESC_B_MASK)
692
                mask = 0xffffffff;
693
            else
694
                mask = 0xffff;
695
            esp = (ESP - (2 << shift)) & mask;
696
            ssp = env->segs[R_SS].base + esp;
697
            if (shift)
698
                stl_kernel(ssp, error_code);
699
            else
700
                stw_kernel(ssp, error_code);
701
            SET_ESP(esp, mask);
702
        }
703
        return;
704
    case 6: /* 286 interrupt gate */
705
    case 7: /* 286 trap gate */
706
    case 14: /* 386 interrupt gate */
707
    case 15: /* 386 trap gate */
708
        break;
709
    default:
710
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711
        break;
712
    }
713
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714
    cpl = env->hflags & HF_CPL_MASK;
715
    /* check privilege if software int */
716
    if (is_int && dpl < cpl)
717
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718
    /* check valid bit */
719
    if (!(e2 & DESC_P_MASK))
720
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721
    selector = e1 >> 16;
722
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723
    if ((selector & 0xfffc) == 0)
724
        raise_exception_err(EXCP0D_GPF, 0);
725

    
726
    if (load_segment(&e1, &e2, selector) != 0)
727
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731
    if (dpl > cpl)
732
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733
    if (!(e2 & DESC_P_MASK))
734
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736
        /* to inner privilege */
737
        get_ss_esp_from_tss(&ss, &esp, dpl);
738
        if ((ss & 0xfffc) == 0)
739
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740
        if ((ss & 3) != dpl)
741
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745
        if (ss_dpl != dpl)
746
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747
        if (!(ss_e2 & DESC_S_MASK) ||
748
            (ss_e2 & DESC_CS_MASK) ||
749
            !(ss_e2 & DESC_W_MASK))
750
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751
        if (!(ss_e2 & DESC_P_MASK))
752
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753
        new_stack = 1;
754
        sp_mask = get_sp_mask(ss_e2);
755
        ssp = get_seg_base(ss_e1, ss_e2);
756
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757
        /* to same privilege */
758
        if (env->eflags & VM_MASK)
759
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760
        new_stack = 0;
761
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
762
        ssp = env->segs[R_SS].base;
763
        esp = ESP;
764
        dpl = cpl;
765
    } else {
766
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767
        new_stack = 0; /* avoid warning */
768
        sp_mask = 0; /* avoid warning */
769
        ssp = 0; /* avoid warning */
770
        esp = 0; /* avoid warning */
771
    }
772

    
773
    shift = type >> 3;
774

    
775
#if 0
776
    /* XXX: check that enough room is available */
777
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778
    if (env->eflags & VM_MASK)
779
        push_size += 8;
780
    push_size <<= shift;
781
#endif
782
    if (shift == 1) {
783
        if (new_stack) {
784
            if (env->eflags & VM_MASK) {
785
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
789
            }
790
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791
            PUSHL(ssp, esp, sp_mask, ESP);
792
        }
793
        PUSHL(ssp, esp, sp_mask, compute_eflags());
794
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795
        PUSHL(ssp, esp, sp_mask, old_eip);
796
        if (has_error_code) {
797
            PUSHL(ssp, esp, sp_mask, error_code);
798
        }
799
    } else {
800
        if (new_stack) {
801
            if (env->eflags & VM_MASK) {
802
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
806
            }
807
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808
            PUSHW(ssp, esp, sp_mask, ESP);
809
        }
810
        PUSHW(ssp, esp, sp_mask, compute_eflags());
811
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812
        PUSHW(ssp, esp, sp_mask, old_eip);
813
        if (has_error_code) {
814
            PUSHW(ssp, esp, sp_mask, error_code);
815
        }
816
    }
817

    
818
    if (new_stack) {
819
        if (env->eflags & VM_MASK) {
820
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
824
        }
825
        ss = (ss & ~3) | dpl;
826
        cpu_x86_load_seg_cache(env, R_SS, ss,
827
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
828
    }
829
    SET_ESP(esp, sp_mask);
830

    
831
    selector = (selector & ~3) | dpl;
832
    cpu_x86_load_seg_cache(env, R_CS, selector,
833
                   get_seg_base(e1, e2),
834
                   get_seg_limit(e1, e2),
835
                   e2);
836
    cpu_x86_set_cpl(env, dpl);
837
    env->eip = offset;
838

    
839
    /* interrupt gate clear IF mask */
840
    if ((type & 1) == 0) {
841
        env->eflags &= ~IF_MASK;
842
    }
843
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
844
}
845

    
846
#ifdef TARGET_X86_64
847

    
848
#define PUSHQ(sp, val)\
849
{\
850
    sp -= 8;\
851
    stq_kernel(sp, (val));\
852
}
853

    
854
#define POPQ(sp, val)\
855
{\
856
    val = ldq_kernel(sp);\
857
    sp += 8;\
858
}
859

    
860
static inline target_ulong get_rsp_from_tss(int level)
861
{
862
    int index;
863

    
864
#if 0
865
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
866
           env->tr.base, env->tr.limit);
867
#endif
868

    
869
    if (!(env->tr.flags & DESC_P_MASK))
870
        cpu_abort(env, "invalid tss");
871
    index = 8 * level + 4;
872
    if ((index + 7) > env->tr.limit)
873
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
874
    return ldq_kernel(env->tr.base + index);
875
}
876

    
877
/* 64 bit interrupt */
878
static void do_interrupt64(int intno, int is_int, int error_code,
879
                           target_ulong next_eip, int is_hw)
880
{
881
    SegmentCache *dt;
882
    target_ulong ptr;
883
    int type, dpl, selector, cpl, ist;
884
    int has_error_code, new_stack;
885
    uint32_t e1, e2, e3, ss;
886
    target_ulong old_eip, esp, offset;
887

    
888
    has_error_code = 0;
889
    if (!is_int && !is_hw) {
890
        switch(intno) {
891
        case 8:
892
        case 10:
893
        case 11:
894
        case 12:
895
        case 13:
896
        case 14:
897
        case 17:
898
            has_error_code = 1;
899
            break;
900
        }
901
    }
902
    if (is_int)
903
        old_eip = next_eip;
904
    else
905
        old_eip = env->eip;
906

    
907
    dt = &env->idt;
908
    if (intno * 16 + 15 > dt->limit)
909
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910
    ptr = dt->base + intno * 16;
911
    e1 = ldl_kernel(ptr);
912
    e2 = ldl_kernel(ptr + 4);
913
    e3 = ldl_kernel(ptr + 8);
914
    /* check gate type */
915
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
916
    switch(type) {
917
    case 14: /* 386 interrupt gate */
918
    case 15: /* 386 trap gate */
919
        break;
920
    default:
921
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922
        break;
923
    }
924
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925
    cpl = env->hflags & HF_CPL_MASK;
926
    /* check privilege if software int */
927
    if (is_int && dpl < cpl)
928
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
929
    /* check valid bit */
930
    if (!(e2 & DESC_P_MASK))
931
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
932
    selector = e1 >> 16;
933
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934
    ist = e2 & 7;
935
    if ((selector & 0xfffc) == 0)
936
        raise_exception_err(EXCP0D_GPF, 0);
937

    
938
    if (load_segment(&e1, &e2, selector) != 0)
939
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
943
    if (dpl > cpl)
944
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945
    if (!(e2 & DESC_P_MASK))
946
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
947
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
948
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
949
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
950
        /* to inner privilege */
951
        if (ist != 0)
952
            esp = get_rsp_from_tss(ist + 3);
953
        else
954
            esp = get_rsp_from_tss(dpl);
955
        esp &= ~0xfLL; /* align stack */
956
        ss = 0;
957
        new_stack = 1;
958
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
959
        /* to same privilege */
960
        if (env->eflags & VM_MASK)
961
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962
        new_stack = 0;
963
        if (ist != 0)
964
            esp = get_rsp_from_tss(ist + 3);
965
        else
966
            esp = ESP;
967
        esp &= ~0xfLL; /* align stack */
968
        dpl = cpl;
969
    } else {
970
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971
        new_stack = 0; /* avoid warning */
972
        esp = 0; /* avoid warning */
973
    }
974

    
975
    PUSHQ(esp, env->segs[R_SS].selector);
976
    PUSHQ(esp, ESP);
977
    PUSHQ(esp, compute_eflags());
978
    PUSHQ(esp, env->segs[R_CS].selector);
979
    PUSHQ(esp, old_eip);
980
    if (has_error_code) {
981
        PUSHQ(esp, error_code);
982
    }
983

    
984
    if (new_stack) {
985
        ss = 0 | dpl;
986
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
987
    }
988
    ESP = esp;
989

    
990
    selector = (selector & ~3) | dpl;
991
    cpu_x86_load_seg_cache(env, R_CS, selector,
992
                   get_seg_base(e1, e2),
993
                   get_seg_limit(e1, e2),
994
                   e2);
995
    cpu_x86_set_cpl(env, dpl);
996
    env->eip = offset;
997

    
998
    /* interrupt gate clear IF mask */
999
    if ((type & 1) == 0) {
1000
        env->eflags &= ~IF_MASK;
1001
    }
1002
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1003
}
1004
#endif
1005

    
1006
#ifdef TARGET_X86_64
1007
#if defined(CONFIG_USER_ONLY)
1008
void helper_syscall(int next_eip_addend)
1009
{
1010
    env->exception_index = EXCP_SYSCALL;
1011
    env->exception_next_eip = env->eip + next_eip_addend;
1012
    cpu_loop_exit();
1013
}
1014
#else
1015
void helper_syscall(int next_eip_addend)
1016
{
1017
    int selector;
1018

    
1019
    if (!(env->efer & MSR_EFER_SCE)) {
1020
        raise_exception_err(EXCP06_ILLOP, 0);
1021
    }
1022
    selector = (env->star >> 32) & 0xffff;
1023
    if (env->hflags & HF_LMA_MASK) {
1024
        int code64;
1025

    
1026
        ECX = env->eip + next_eip_addend;
1027
        env->regs[11] = compute_eflags();
1028

    
1029
        code64 = env->hflags & HF_CS64_MASK;
1030

    
1031
        cpu_x86_set_cpl(env, 0);
1032
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1033
                           0, 0xffffffff,
1034
                               DESC_G_MASK | DESC_P_MASK |
1035
                               DESC_S_MASK |
1036
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1037
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1038
                               0, 0xffffffff,
1039
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040
                               DESC_S_MASK |
1041
                               DESC_W_MASK | DESC_A_MASK);
1042
        env->eflags &= ~env->fmask;
1043
        load_eflags(env->eflags, 0);
1044
        if (code64)
1045
            env->eip = env->lstar;
1046
        else
1047
            env->eip = env->cstar;
1048
    } else {
1049
        ECX = (uint32_t)(env->eip + next_eip_addend);
1050

    
1051
        cpu_x86_set_cpl(env, 0);
1052
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053
                           0, 0xffffffff,
1054
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055
                               DESC_S_MASK |
1056
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058
                               0, 0xffffffff,
1059
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060
                               DESC_S_MASK |
1061
                               DESC_W_MASK | DESC_A_MASK);
1062
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063
        env->eip = (uint32_t)env->star;
1064
    }
1065
}
1066
#endif
1067
#endif
1068

    
1069
#ifdef TARGET_X86_64
1070
void helper_sysret(int dflag)
1071
{
1072
    int cpl, selector;
1073

    
1074
    if (!(env->efer & MSR_EFER_SCE)) {
1075
        raise_exception_err(EXCP06_ILLOP, 0);
1076
    }
1077
    cpl = env->hflags & HF_CPL_MASK;
1078
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1079
        raise_exception_err(EXCP0D_GPF, 0);
1080
    }
1081
    selector = (env->star >> 48) & 0xffff;
1082
    if (env->hflags & HF_LMA_MASK) {
1083
        if (dflag == 2) {
1084
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1085
                                   0, 0xffffffff,
1086
                                   DESC_G_MASK | DESC_P_MASK |
1087
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1089
                                   DESC_L_MASK);
1090
            env->eip = ECX;
1091
        } else {
1092
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093
                                   0, 0xffffffff,
1094
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097
            env->eip = (uint32_t)ECX;
1098
        }
1099
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1100
                               0, 0xffffffff,
1101
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103
                               DESC_W_MASK | DESC_A_MASK);
1104
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1105
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1106
        cpu_x86_set_cpl(env, 3);
1107
    } else {
1108
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1109
                               0, 0xffffffff,
1110
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1113
        env->eip = (uint32_t)ECX;
1114
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1115
                               0, 0xffffffff,
1116
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118
                               DESC_W_MASK | DESC_A_MASK);
1119
        env->eflags |= IF_MASK;
1120
        cpu_x86_set_cpl(env, 3);
1121
    }
1122
#ifdef USE_KQEMU
1123
    if (kqemu_is_ok(env)) {
1124
        if (env->hflags & HF_LMA_MASK)
1125
            CC_OP = CC_OP_EFLAGS;
1126
        env->exception_index = -1;
1127
        cpu_loop_exit();
1128
    }
1129
#endif
1130
}
1131
#endif
1132

    
1133
/* real mode interrupt */
1134
static void do_interrupt_real(int intno, int is_int, int error_code,
1135
                              unsigned int next_eip)
1136
{
1137
    SegmentCache *dt;
1138
    target_ulong ptr, ssp;
1139
    int selector;
1140
    uint32_t offset, esp;
1141
    uint32_t old_cs, old_eip;
1142

    
1143
    /* real mode (simpler !) */
1144
    dt = &env->idt;
1145
    if (intno * 4 + 3 > dt->limit)
1146
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147
    ptr = dt->base + intno * 4;
1148
    offset = lduw_kernel(ptr);
1149
    selector = lduw_kernel(ptr + 2);
1150
    esp = ESP;
1151
    ssp = env->segs[R_SS].base;
1152
    if (is_int)
1153
        old_eip = next_eip;
1154
    else
1155
        old_eip = env->eip;
1156
    old_cs = env->segs[R_CS].selector;
1157
    /* XXX: use SS segment size ? */
1158
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1159
    PUSHW(ssp, esp, 0xffff, old_cs);
1160
    PUSHW(ssp, esp, 0xffff, old_eip);
1161

    
1162
    /* update processor state */
1163
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1164
    env->eip = offset;
1165
    env->segs[R_CS].selector = selector;
1166
    env->segs[R_CS].base = (selector << 4);
1167
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1168
}
1169

    
1170
/* fake user mode interrupt */
1171
void do_interrupt_user(int intno, int is_int, int error_code,
1172
                       target_ulong next_eip)
1173
{
1174
    SegmentCache *dt;
1175
    target_ulong ptr;
1176
    int dpl, cpl, shift;
1177
    uint32_t e2;
1178

    
1179
    dt = &env->idt;
1180
    if (env->hflags & HF_LMA_MASK) {
1181
        shift = 4;
1182
    } else {
1183
        shift = 3;
1184
    }
1185
    ptr = dt->base + (intno << shift);
1186
    e2 = ldl_kernel(ptr + 4);
1187

    
1188
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189
    cpl = env->hflags & HF_CPL_MASK;
1190
    /* check privilege if software int */
1191
    if (is_int && dpl < cpl)
1192
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1193

    
1194
    /* Since we emulate only user space, we cannot do more than
1195
       exiting the emulation with the suitable exception and error
1196
       code */
1197
    if (is_int)
1198
        EIP = next_eip;
1199
}
1200

    
1201
/*
1202
 * Begin execution of an interruption. is_int is TRUE if coming from
1203
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204
 * instruction. It is only relevant if is_int is TRUE.
1205
 */
1206
void do_interrupt(int intno, int is_int, int error_code,
1207
                  target_ulong next_eip, int is_hw)
1208
{
1209
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210
        if ((env->cr[0] & CR0_PE_MASK)) {
1211
            static int count;
1212
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1213
                    count, intno, error_code, is_int,
1214
                    env->hflags & HF_CPL_MASK,
1215
                    env->segs[R_CS].selector, EIP,
1216
                    (int)env->segs[R_CS].base + EIP,
1217
                    env->segs[R_SS].selector, ESP);
1218
            if (intno == 0x0e) {
1219
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1220
            } else {
1221
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1222
            }
1223
            qemu_log("\n");
1224
            log_cpu_state(env, X86_DUMP_CCOP);
1225
#if 0
1226
            {
1227
                int i;
1228
                uint8_t *ptr;
1229
                qemu_log("       code=");
1230
                ptr = env->segs[R_CS].base + env->eip;
1231
                for(i = 0; i < 16; i++) {
1232
                    qemu_log(" %02x", ldub(ptr + i));
1233
                }
1234
                qemu_log("\n");
1235
            }
1236
#endif
1237
            count++;
1238
        }
1239
    }
1240
    if (env->cr[0] & CR0_PE_MASK) {
1241
#ifdef TARGET_X86_64
1242
        if (env->hflags & HF_LMA_MASK) {
1243
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1244
        } else
1245
#endif
1246
        {
1247
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1248
        }
1249
    } else {
1250
        do_interrupt_real(intno, is_int, error_code, next_eip);
1251
    }
1252
}
1253

    
1254
/* This should come from sysemu.h - if we could include it here... */
1255
void qemu_system_reset_request(void);
1256

    
1257
/*
1258
 * Check nested exceptions and change to double or triple fault if
1259
 * needed. It should only be called, if this is not an interrupt.
1260
 * Returns the new exception number.
1261
 */
1262
static int check_exception(int intno, int *error_code)
1263
{
1264
    int first_contributory = env->old_exception == 0 ||
1265
                              (env->old_exception >= 10 &&
1266
                               env->old_exception <= 13);
1267
    int second_contributory = intno == 0 ||
1268
                               (intno >= 10 && intno <= 13);
1269

    
1270
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1271
                env->old_exception, intno);
1272

    
1273
#if !defined(CONFIG_USER_ONLY)
1274
    if (env->old_exception == EXCP08_DBLE) {
1275
        if (env->hflags & HF_SVMI_MASK)
1276
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1277

    
1278
        if (qemu_loglevel_mask(CPU_LOG_RESET))
1279
            fprintf(logfile, "Triple fault\n");
1280

    
1281
        qemu_system_reset_request();
1282
        return EXCP_HLT;
1283
    }
1284
#endif
1285

    
1286
    if ((first_contributory && second_contributory)
1287
        || (env->old_exception == EXCP0E_PAGE &&
1288
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1289
        intno = EXCP08_DBLE;
1290
        *error_code = 0;
1291
    }
1292

    
1293
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1294
        (intno == EXCP08_DBLE))
1295
        env->old_exception = intno;
1296

    
1297
    return intno;
1298
}
1299

    
1300
/*
1301
 * Signal an interruption. It is executed in the main CPU loop.
1302
 * is_int is TRUE if coming from the int instruction. next_eip is the
1303
 * EIP value AFTER the interrupt instruction. It is only relevant if
1304
 * is_int is TRUE.
1305
 */
1306
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1307
                                          int next_eip_addend)
1308
{
1309
    if (!is_int) {
1310
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1311
        intno = check_exception(intno, &error_code);
1312
    } else {
1313
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1314
    }
1315

    
1316
    env->exception_index = intno;
1317
    env->error_code = error_code;
1318
    env->exception_is_int = is_int;
1319
    env->exception_next_eip = env->eip + next_eip_addend;
1320
    cpu_loop_exit();
1321
}
1322

    
1323
/* shortcuts to generate exceptions */
1324

    
1325
void raise_exception_err(int exception_index, int error_code)
1326
{
1327
    raise_interrupt(exception_index, 0, error_code, 0);
1328
}
1329

    
1330
void raise_exception(int exception_index)
1331
{
1332
    raise_interrupt(exception_index, 0, 0, 0);
1333
}
1334

    
1335
/* SMM support */
1336

    
1337
#if defined(CONFIG_USER_ONLY)
1338

    
1339
void do_smm_enter(void)
1340
{
1341
}
1342

    
1343
void helper_rsm(void)
1344
{
1345
}
1346

    
1347
#else
1348

    
1349
#ifdef TARGET_X86_64
1350
#define SMM_REVISION_ID 0x00020064
1351
#else
1352
#define SMM_REVISION_ID 0x00020000
1353
#endif
1354

    
1355
void do_smm_enter(void)
1356
{
1357
    target_ulong sm_state;
1358
    SegmentCache *dt;
1359
    int i, offset;
1360

    
1361
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1362
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1363

    
1364
    env->hflags |= HF_SMM_MASK;
1365
    cpu_smm_update(env);
1366

    
1367
    sm_state = env->smbase + 0x8000;
1368

    
1369
#ifdef TARGET_X86_64
1370
    for(i = 0; i < 6; i++) {
1371
        dt = &env->segs[i];
1372
        offset = 0x7e00 + i * 16;
1373
        stw_phys(sm_state + offset, dt->selector);
1374
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1375
        stl_phys(sm_state + offset + 4, dt->limit);
1376
        stq_phys(sm_state + offset + 8, dt->base);
1377
    }
1378

    
1379
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1380
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1381

    
1382
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1383
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1384
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1385
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1386

    
1387
    stq_phys(sm_state + 0x7e88, env->idt.base);
1388
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1389

    
1390
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1391
    stq_phys(sm_state + 0x7e98, env->tr.base);
1392
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1393
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1394

    
1395
    stq_phys(sm_state + 0x7ed0, env->efer);
1396

    
1397
    stq_phys(sm_state + 0x7ff8, EAX);
1398
    stq_phys(sm_state + 0x7ff0, ECX);
1399
    stq_phys(sm_state + 0x7fe8, EDX);
1400
    stq_phys(sm_state + 0x7fe0, EBX);
1401
    stq_phys(sm_state + 0x7fd8, ESP);
1402
    stq_phys(sm_state + 0x7fd0, EBP);
1403
    stq_phys(sm_state + 0x7fc8, ESI);
1404
    stq_phys(sm_state + 0x7fc0, EDI);
1405
    for(i = 8; i < 16; i++)
1406
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1407
    stq_phys(sm_state + 0x7f78, env->eip);
1408
    stl_phys(sm_state + 0x7f70, compute_eflags());
1409
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1410
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1411

    
1412
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1413
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1414
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1415

    
1416
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1417
    stl_phys(sm_state + 0x7f00, env->smbase);
1418
#else
1419
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1420
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1421
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1422
    stl_phys(sm_state + 0x7ff0, env->eip);
1423
    stl_phys(sm_state + 0x7fec, EDI);
1424
    stl_phys(sm_state + 0x7fe8, ESI);
1425
    stl_phys(sm_state + 0x7fe4, EBP);
1426
    stl_phys(sm_state + 0x7fe0, ESP);
1427
    stl_phys(sm_state + 0x7fdc, EBX);
1428
    stl_phys(sm_state + 0x7fd8, EDX);
1429
    stl_phys(sm_state + 0x7fd4, ECX);
1430
    stl_phys(sm_state + 0x7fd0, EAX);
1431
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1432
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1433

    
1434
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1435
    stl_phys(sm_state + 0x7f64, env->tr.base);
1436
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1437
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1438

    
1439
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1440
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1441
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1442
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1443

    
1444
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1445
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1446

    
1447
    stl_phys(sm_state + 0x7f58, env->idt.base);
1448
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1449

    
1450
    for(i = 0; i < 6; i++) {
1451
        dt = &env->segs[i];
1452
        if (i < 3)
1453
            offset = 0x7f84 + i * 12;
1454
        else
1455
            offset = 0x7f2c + (i - 3) * 12;
1456
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1457
        stl_phys(sm_state + offset + 8, dt->base);
1458
        stl_phys(sm_state + offset + 4, dt->limit);
1459
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1460
    }
1461
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1462

    
1463
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1464
    stl_phys(sm_state + 0x7ef8, env->smbase);
1465
#endif
1466
    /* init SMM cpu state */
1467

    
1468
#ifdef TARGET_X86_64
1469
    cpu_load_efer(env, 0);
1470
#endif
1471
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1472
    env->eip = 0x00008000;
1473
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1474
                           0xffffffff, 0);
1475
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1476
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1477
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1478
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1479
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1480

    
1481
    cpu_x86_update_cr0(env,
1482
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1483
    cpu_x86_update_cr4(env, 0);
1484
    env->dr[7] = 0x00000400;
1485
    CC_OP = CC_OP_EFLAGS;
1486
}
1487

    
1488
void helper_rsm(void)
1489
{
1490
    target_ulong sm_state;
1491
    int i, offset;
1492
    uint32_t val;
1493

    
1494
    sm_state = env->smbase + 0x8000;
1495
#ifdef TARGET_X86_64
1496
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1497

    
1498
    for(i = 0; i < 6; i++) {
1499
        offset = 0x7e00 + i * 16;
1500
        cpu_x86_load_seg_cache(env, i,
1501
                               lduw_phys(sm_state + offset),
1502
                               ldq_phys(sm_state + offset + 8),
1503
                               ldl_phys(sm_state + offset + 4),
1504
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1505
    }
1506

    
1507
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1508
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1509

    
1510
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1511
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1512
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1513
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1514

    
1515
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1516
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1517

    
1518
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1519
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1520
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1521
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1522

    
1523
    EAX = ldq_phys(sm_state + 0x7ff8);
1524
    ECX = ldq_phys(sm_state + 0x7ff0);
1525
    EDX = ldq_phys(sm_state + 0x7fe8);
1526
    EBX = ldq_phys(sm_state + 0x7fe0);
1527
    ESP = ldq_phys(sm_state + 0x7fd8);
1528
    EBP = ldq_phys(sm_state + 0x7fd0);
1529
    ESI = ldq_phys(sm_state + 0x7fc8);
1530
    EDI = ldq_phys(sm_state + 0x7fc0);
1531
    for(i = 8; i < 16; i++)
1532
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1533
    env->eip = ldq_phys(sm_state + 0x7f78);
1534
    load_eflags(ldl_phys(sm_state + 0x7f70),
1535
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1537
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1538

    
1539
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1540
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1541
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1542

    
1543
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1544
    if (val & 0x20000) {
1545
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1546
    }
1547
#else
1548
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1549
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1550
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1551
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1552
    env->eip = ldl_phys(sm_state + 0x7ff0);
1553
    EDI = ldl_phys(sm_state + 0x7fec);
1554
    ESI = ldl_phys(sm_state + 0x7fe8);
1555
    EBP = ldl_phys(sm_state + 0x7fe4);
1556
    ESP = ldl_phys(sm_state + 0x7fe0);
1557
    EBX = ldl_phys(sm_state + 0x7fdc);
1558
    EDX = ldl_phys(sm_state + 0x7fd8);
1559
    ECX = ldl_phys(sm_state + 0x7fd4);
1560
    EAX = ldl_phys(sm_state + 0x7fd0);
1561
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1562
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1563

    
1564
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1565
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1566
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1567
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1568

    
1569
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1570
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1571
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1572
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1573

    
1574
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1575
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1576

    
1577
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1578
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1579

    
1580
    for(i = 0; i < 6; i++) {
1581
        if (i < 3)
1582
            offset = 0x7f84 + i * 12;
1583
        else
1584
            offset = 0x7f2c + (i - 3) * 12;
1585
        cpu_x86_load_seg_cache(env, i,
1586
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1587
                               ldl_phys(sm_state + offset + 8),
1588
                               ldl_phys(sm_state + offset + 4),
1589
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1590
    }
1591
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1592

    
1593
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1594
    if (val & 0x20000) {
1595
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1596
    }
1597
#endif
1598
    CC_OP = CC_OP_EFLAGS;
1599
    env->hflags &= ~HF_SMM_MASK;
1600
    cpu_smm_update(env);
1601

    
1602
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1603
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1604
}
1605

    
1606
#endif /* !CONFIG_USER_ONLY */
1607

    
1608

    
1609
/* division, flags are undefined */
1610

    
1611
void helper_divb_AL(target_ulong t0)
1612
{
1613
    unsigned int num, den, q, r;
1614

    
1615
    num = (EAX & 0xffff);
1616
    den = (t0 & 0xff);
1617
    if (den == 0) {
1618
        raise_exception(EXCP00_DIVZ);
1619
    }
1620
    q = (num / den);
1621
    if (q > 0xff)
1622
        raise_exception(EXCP00_DIVZ);
1623
    q &= 0xff;
1624
    r = (num % den) & 0xff;
1625
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1626
}
1627

    
1628
void helper_idivb_AL(target_ulong t0)
1629
{
1630
    int num, den, q, r;
1631

    
1632
    num = (int16_t)EAX;
1633
    den = (int8_t)t0;
1634
    if (den == 0) {
1635
        raise_exception(EXCP00_DIVZ);
1636
    }
1637
    q = (num / den);
1638
    if (q != (int8_t)q)
1639
        raise_exception(EXCP00_DIVZ);
1640
    q &= 0xff;
1641
    r = (num % den) & 0xff;
1642
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1643
}
1644

    
1645
void helper_divw_AX(target_ulong t0)
1646
{
1647
    unsigned int num, den, q, r;
1648

    
1649
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1650
    den = (t0 & 0xffff);
1651
    if (den == 0) {
1652
        raise_exception(EXCP00_DIVZ);
1653
    }
1654
    q = (num / den);
1655
    if (q > 0xffff)
1656
        raise_exception(EXCP00_DIVZ);
1657
    q &= 0xffff;
1658
    r = (num % den) & 0xffff;
1659
    EAX = (EAX & ~0xffff) | q;
1660
    EDX = (EDX & ~0xffff) | r;
1661
}
1662

    
1663
void helper_idivw_AX(target_ulong t0)
1664
{
1665
    int num, den, q, r;
1666

    
1667
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1668
    den = (int16_t)t0;
1669
    if (den == 0) {
1670
        raise_exception(EXCP00_DIVZ);
1671
    }
1672
    q = (num / den);
1673
    if (q != (int16_t)q)
1674
        raise_exception(EXCP00_DIVZ);
1675
    q &= 0xffff;
1676
    r = (num % den) & 0xffff;
1677
    EAX = (EAX & ~0xffff) | q;
1678
    EDX = (EDX & ~0xffff) | r;
1679
}
1680

    
1681
void helper_divl_EAX(target_ulong t0)
1682
{
1683
    unsigned int den, r;
1684
    uint64_t num, q;
1685

    
1686
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1687
    den = t0;
1688
    if (den == 0) {
1689
        raise_exception(EXCP00_DIVZ);
1690
    }
1691
    q = (num / den);
1692
    r = (num % den);
1693
    if (q > 0xffffffff)
1694
        raise_exception(EXCP00_DIVZ);
1695
    EAX = (uint32_t)q;
1696
    EDX = (uint32_t)r;
1697
}
1698

    
1699
void helper_idivl_EAX(target_ulong t0)
1700
{
1701
    int den, r;
1702
    int64_t num, q;
1703

    
1704
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1705
    den = t0;
1706
    if (den == 0) {
1707
        raise_exception(EXCP00_DIVZ);
1708
    }
1709
    q = (num / den);
1710
    r = (num % den);
1711
    if (q != (int32_t)q)
1712
        raise_exception(EXCP00_DIVZ);
1713
    EAX = (uint32_t)q;
1714
    EDX = (uint32_t)r;
1715
}
1716

    
1717
/* bcd */
1718

    
1719
/* XXX: exception */
1720
void helper_aam(int base)
1721
{
1722
    int al, ah;
1723
    al = EAX & 0xff;
1724
    ah = al / base;
1725
    al = al % base;
1726
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1727
    CC_DST = al;
1728
}
1729

    
1730
void helper_aad(int base)
1731
{
1732
    int al, ah;
1733
    al = EAX & 0xff;
1734
    ah = (EAX >> 8) & 0xff;
1735
    al = ((ah * base) + al) & 0xff;
1736
    EAX = (EAX & ~0xffff) | al;
1737
    CC_DST = al;
1738
}
1739

    
1740
void helper_aaa(void)
1741
{
1742
    int icarry;
1743
    int al, ah, af;
1744
    int eflags;
1745

    
1746
    eflags = helper_cc_compute_all(CC_OP);
1747
    af = eflags & CC_A;
1748
    al = EAX & 0xff;
1749
    ah = (EAX >> 8) & 0xff;
1750

    
1751
    icarry = (al > 0xf9);
1752
    if (((al & 0x0f) > 9 ) || af) {
1753
        al = (al + 6) & 0x0f;
1754
        ah = (ah + 1 + icarry) & 0xff;
1755
        eflags |= CC_C | CC_A;
1756
    } else {
1757
        eflags &= ~(CC_C | CC_A);
1758
        al &= 0x0f;
1759
    }
1760
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1761
    CC_SRC = eflags;
1762
}
1763

    
1764
void helper_aas(void)
1765
{
1766
    int icarry;
1767
    int al, ah, af;
1768
    int eflags;
1769

    
1770
    eflags = helper_cc_compute_all(CC_OP);
1771
    af = eflags & CC_A;
1772
    al = EAX & 0xff;
1773
    ah = (EAX >> 8) & 0xff;
1774

    
1775
    icarry = (al < 6);
1776
    if (((al & 0x0f) > 9 ) || af) {
1777
        al = (al - 6) & 0x0f;
1778
        ah = (ah - 1 - icarry) & 0xff;
1779
        eflags |= CC_C | CC_A;
1780
    } else {
1781
        eflags &= ~(CC_C | CC_A);
1782
        al &= 0x0f;
1783
    }
1784
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1785
    CC_SRC = eflags;
1786
}
1787

    
1788
void helper_daa(void)
1789
{
1790
    int al, af, cf;
1791
    int eflags;
1792

    
1793
    eflags = helper_cc_compute_all(CC_OP);
1794
    cf = eflags & CC_C;
1795
    af = eflags & CC_A;
1796
    al = EAX & 0xff;
1797

    
1798
    eflags = 0;
1799
    if (((al & 0x0f) > 9 ) || af) {
1800
        al = (al + 6) & 0xff;
1801
        eflags |= CC_A;
1802
    }
1803
    if ((al > 0x9f) || cf) {
1804
        al = (al + 0x60) & 0xff;
1805
        eflags |= CC_C;
1806
    }
1807
    EAX = (EAX & ~0xff) | al;
1808
    /* well, speed is not an issue here, so we compute the flags by hand */
1809
    eflags |= (al == 0) << 6; /* zf */
1810
    eflags |= parity_table[al]; /* pf */
1811
    eflags |= (al & 0x80); /* sf */
1812
    CC_SRC = eflags;
1813
}
1814

    
1815
void helper_das(void)
1816
{
1817
    int al, al1, af, cf;
1818
    int eflags;
1819

    
1820
    eflags = helper_cc_compute_all(CC_OP);
1821
    cf = eflags & CC_C;
1822
    af = eflags & CC_A;
1823
    al = EAX & 0xff;
1824

    
1825
    eflags = 0;
1826
    al1 = al;
1827
    if (((al & 0x0f) > 9 ) || af) {
1828
        eflags |= CC_A;
1829
        if (al < 6 || cf)
1830
            eflags |= CC_C;
1831
        al = (al - 6) & 0xff;
1832
    }
1833
    if ((al1 > 0x99) || cf) {
1834
        al = (al - 0x60) & 0xff;
1835
        eflags |= CC_C;
1836
    }
1837
    EAX = (EAX & ~0xff) | al;
1838
    /* well, speed is not an issue here, so we compute the flags by hand */
1839
    eflags |= (al == 0) << 6; /* zf */
1840
    eflags |= parity_table[al]; /* pf */
1841
    eflags |= (al & 0x80); /* sf */
1842
    CC_SRC = eflags;
1843
}
1844

    
1845
void helper_into(int next_eip_addend)
1846
{
1847
    int eflags;
1848
    eflags = helper_cc_compute_all(CC_OP);
1849
    if (eflags & CC_O) {
1850
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1851
    }
1852
}
1853

    
1854
void helper_cmpxchg8b(target_ulong a0)
1855
{
1856
    uint64_t d;
1857
    int eflags;
1858

    
1859
    eflags = helper_cc_compute_all(CC_OP);
1860
    d = ldq(a0);
1861
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1862
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1863
        eflags |= CC_Z;
1864
    } else {
1865
        /* always do the store */
1866
        stq(a0, d); 
1867
        EDX = (uint32_t)(d >> 32);
1868
        EAX = (uint32_t)d;
1869
        eflags &= ~CC_Z;
1870
    }
1871
    CC_SRC = eflags;
1872
}
1873

    
1874
#ifdef TARGET_X86_64
1875
void helper_cmpxchg16b(target_ulong a0)
1876
{
1877
    uint64_t d0, d1;
1878
    int eflags;
1879

    
1880
    if ((a0 & 0xf) != 0)
1881
        raise_exception(EXCP0D_GPF);
1882
    eflags = helper_cc_compute_all(CC_OP);
1883
    d0 = ldq(a0);
1884
    d1 = ldq(a0 + 8);
1885
    if (d0 == EAX && d1 == EDX) {
1886
        stq(a0, EBX);
1887
        stq(a0 + 8, ECX);
1888
        eflags |= CC_Z;
1889
    } else {
1890
        /* always do the store */
1891
        stq(a0, d0); 
1892
        stq(a0 + 8, d1); 
1893
        EDX = d1;
1894
        EAX = d0;
1895
        eflags &= ~CC_Z;
1896
    }
1897
    CC_SRC = eflags;
1898
}
1899
#endif
1900

    
1901
void helper_single_step(void)
1902
{
1903
#ifndef CONFIG_USER_ONLY
1904
    check_hw_breakpoints(env, 1);
1905
    env->dr[6] |= DR6_BS;
1906
#endif
1907
    raise_exception(EXCP01_DB);
1908
}
1909

    
1910
void helper_cpuid(void)
1911
{
1912
    uint32_t eax, ebx, ecx, edx;
1913

    
1914
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1915

    
1916
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1917
    EAX = eax;
1918
    EBX = ebx;
1919
    ECX = ecx;
1920
    EDX = edx;
1921
}
1922

    
1923
void helper_enter_level(int level, int data32, target_ulong t1)
1924
{
1925
    target_ulong ssp;
1926
    uint32_t esp_mask, esp, ebp;
1927

    
1928
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1929
    ssp = env->segs[R_SS].base;
1930
    ebp = EBP;
1931
    esp = ESP;
1932
    if (data32) {
1933
        /* 32 bit */
1934
        esp -= 4;
1935
        while (--level) {
1936
            esp -= 4;
1937
            ebp -= 4;
1938
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1939
        }
1940
        esp -= 4;
1941
        stl(ssp + (esp & esp_mask), t1);
1942
    } else {
1943
        /* 16 bit */
1944
        esp -= 2;
1945
        while (--level) {
1946
            esp -= 2;
1947
            ebp -= 2;
1948
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1949
        }
1950
        esp -= 2;
1951
        stw(ssp + (esp & esp_mask), t1);
1952
    }
1953
}
1954

    
1955
#ifdef TARGET_X86_64
1956
void helper_enter64_level(int level, int data64, target_ulong t1)
1957
{
1958
    target_ulong esp, ebp;
1959
    ebp = EBP;
1960
    esp = ESP;
1961

    
1962
    if (data64) {
1963
        /* 64 bit */
1964
        esp -= 8;
1965
        while (--level) {
1966
            esp -= 8;
1967
            ebp -= 8;
1968
            stq(esp, ldq(ebp));
1969
        }
1970
        esp -= 8;
1971
        stq(esp, t1);
1972
    } else {
1973
        /* 16 bit */
1974
        esp -= 2;
1975
        while (--level) {
1976
            esp -= 2;
1977
            ebp -= 2;
1978
            stw(esp, lduw(ebp));
1979
        }
1980
        esp -= 2;
1981
        stw(esp, t1);
1982
    }
1983
}
1984
#endif
1985

    
1986
void helper_lldt(int selector)
1987
{
1988
    SegmentCache *dt;
1989
    uint32_t e1, e2;
1990
    int index, entry_limit;
1991
    target_ulong ptr;
1992

    
1993
    selector &= 0xffff;
1994
    if ((selector & 0xfffc) == 0) {
1995
        /* XXX: NULL selector case: invalid LDT */
1996
        env->ldt.base = 0;
1997
        env->ldt.limit = 0;
1998
    } else {
1999
        if (selector & 0x4)
2000
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2001
        dt = &env->gdt;
2002
        index = selector & ~7;
2003
#ifdef TARGET_X86_64
2004
        if (env->hflags & HF_LMA_MASK)
2005
            entry_limit = 15;
2006
        else
2007
#endif
2008
            entry_limit = 7;
2009
        if ((index + entry_limit) > dt->limit)
2010
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2011
        ptr = dt->base + index;
2012
        e1 = ldl_kernel(ptr);
2013
        e2 = ldl_kernel(ptr + 4);
2014
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2015
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2016
        if (!(e2 & DESC_P_MASK))
2017
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2018
#ifdef TARGET_X86_64
2019
        if (env->hflags & HF_LMA_MASK) {
2020
            uint32_t e3;
2021
            e3 = ldl_kernel(ptr + 8);
2022
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2023
            env->ldt.base |= (target_ulong)e3 << 32;
2024
        } else
2025
#endif
2026
        {
2027
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2028
        }
2029
    }
2030
    env->ldt.selector = selector;
2031
}
2032

    
2033
void helper_ltr(int selector)
2034
{
2035
    SegmentCache *dt;
2036
    uint32_t e1, e2;
2037
    int index, type, entry_limit;
2038
    target_ulong ptr;
2039

    
2040
    selector &= 0xffff;
2041
    if ((selector & 0xfffc) == 0) {
2042
        /* NULL selector case: invalid TR */
2043
        env->tr.base = 0;
2044
        env->tr.limit = 0;
2045
        env->tr.flags = 0;
2046
    } else {
2047
        if (selector & 0x4)
2048
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2049
        dt = &env->gdt;
2050
        index = selector & ~7;
2051
#ifdef TARGET_X86_64
2052
        if (env->hflags & HF_LMA_MASK)
2053
            entry_limit = 15;
2054
        else
2055
#endif
2056
            entry_limit = 7;
2057
        if ((index + entry_limit) > dt->limit)
2058
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2059
        ptr = dt->base + index;
2060
        e1 = ldl_kernel(ptr);
2061
        e2 = ldl_kernel(ptr + 4);
2062
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2063
        if ((e2 & DESC_S_MASK) ||
2064
            (type != 1 && type != 9))
2065
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2066
        if (!(e2 & DESC_P_MASK))
2067
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2068
#ifdef TARGET_X86_64
2069
        if (env->hflags & HF_LMA_MASK) {
2070
            uint32_t e3, e4;
2071
            e3 = ldl_kernel(ptr + 8);
2072
            e4 = ldl_kernel(ptr + 12);
2073
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2074
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2075
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2076
            env->tr.base |= (target_ulong)e3 << 32;
2077
        } else
2078
#endif
2079
        {
2080
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2081
        }
2082
        e2 |= DESC_TSS_BUSY_MASK;
2083
        stl_kernel(ptr + 4, e2);
2084
    }
2085
    env->tr.selector = selector;
2086
}
2087

    
2088
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2089
void helper_load_seg(int seg_reg, int selector)
2090
{
2091
    uint32_t e1, e2;
2092
    int cpl, dpl, rpl;
2093
    SegmentCache *dt;
2094
    int index;
2095
    target_ulong ptr;
2096

    
2097
    selector &= 0xffff;
2098
    cpl = env->hflags & HF_CPL_MASK;
2099
    if ((selector & 0xfffc) == 0) {
2100
        /* null selector case */
2101
        if (seg_reg == R_SS
2102
#ifdef TARGET_X86_64
2103
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2104
#endif
2105
            )
2106
            raise_exception_err(EXCP0D_GPF, 0);
2107
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2108
    } else {
2109

    
2110
        if (selector & 0x4)
2111
            dt = &env->ldt;
2112
        else
2113
            dt = &env->gdt;
2114
        index = selector & ~7;
2115
        if ((index + 7) > dt->limit)
2116
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2117
        ptr = dt->base + index;
2118
        e1 = ldl_kernel(ptr);
2119
        e2 = ldl_kernel(ptr + 4);
2120

    
2121
        if (!(e2 & DESC_S_MASK))
2122
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2123
        rpl = selector & 3;
2124
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2125
        if (seg_reg == R_SS) {
2126
            /* must be writable segment */
2127
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2128
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2129
            if (rpl != cpl || dpl != cpl)
2130
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131
        } else {
2132
            /* must be readable segment */
2133
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2134
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135

    
2136
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2137
                /* if not conforming code, test rights */
2138
                if (dpl < cpl || dpl < rpl)
2139
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140
            }
2141
        }
2142

    
2143
        if (!(e2 & DESC_P_MASK)) {
2144
            if (seg_reg == R_SS)
2145
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2146
            else
2147
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2148
        }
2149

    
2150
        /* set the access bit if not already set */
2151
        if (!(e2 & DESC_A_MASK)) {
2152
            e2 |= DESC_A_MASK;
2153
            stl_kernel(ptr + 4, e2);
2154
        }
2155

    
2156
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2157
                       get_seg_base(e1, e2),
2158
                       get_seg_limit(e1, e2),
2159
                       e2);
2160
#if 0
2161
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2162
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2163
#endif
2164
    }
2165
}
2166

    
2167
/* protected mode jump */
2168
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2169
                           int next_eip_addend)
2170
{
2171
    int gate_cs, type;
2172
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2173
    target_ulong next_eip;
2174

    
2175
    if ((new_cs & 0xfffc) == 0)
2176
        raise_exception_err(EXCP0D_GPF, 0);
2177
    if (load_segment(&e1, &e2, new_cs) != 0)
2178
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2179
    cpl = env->hflags & HF_CPL_MASK;
2180
    if (e2 & DESC_S_MASK) {
2181
        if (!(e2 & DESC_CS_MASK))
2182
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2184
        if (e2 & DESC_C_MASK) {
2185
            /* conforming code segment */
2186
            if (dpl > cpl)
2187
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2188
        } else {
2189
            /* non conforming code segment */
2190
            rpl = new_cs & 3;
2191
            if (rpl > cpl)
2192
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2193
            if (dpl != cpl)
2194
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2195
        }
2196
        if (!(e2 & DESC_P_MASK))
2197
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2198
        limit = get_seg_limit(e1, e2);
2199
        if (new_eip > limit &&
2200
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2201
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2203
                       get_seg_base(e1, e2), limit, e2);
2204
        EIP = new_eip;
2205
    } else {
2206
        /* jump to call or task gate */
2207
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2208
        rpl = new_cs & 3;
2209
        cpl = env->hflags & HF_CPL_MASK;
2210
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2211
        switch(type) {
2212
        case 1: /* 286 TSS */
2213
        case 9: /* 386 TSS */
2214
        case 5: /* task gate */
2215
            if (dpl < cpl || dpl < rpl)
2216
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
            next_eip = env->eip + next_eip_addend;
2218
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2219
            CC_OP = CC_OP_EFLAGS;
2220
            break;
2221
        case 4: /* 286 call gate */
2222
        case 12: /* 386 call gate */
2223
            if ((dpl < cpl) || (dpl < rpl))
2224
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225
            if (!(e2 & DESC_P_MASK))
2226
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227
            gate_cs = e1 >> 16;
2228
            new_eip = (e1 & 0xffff);
2229
            if (type == 12)
2230
                new_eip |= (e2 & 0xffff0000);
2231
            if (load_segment(&e1, &e2, gate_cs) != 0)
2232
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2233
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2234
            /* must be code segment */
2235
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2236
                 (DESC_S_MASK | DESC_CS_MASK)))
2237
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2238
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2239
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2240
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2241
            if (!(e2 & DESC_P_MASK))
2242
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2243
            limit = get_seg_limit(e1, e2);
2244
            if (new_eip > limit)
2245
                raise_exception_err(EXCP0D_GPF, 0);
2246
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2247
                                   get_seg_base(e1, e2), limit, e2);
2248
            EIP = new_eip;
2249
            break;
2250
        default:
2251
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252
            break;
2253
        }
2254
    }
2255
}
2256

    
2257
/* real mode call */
2258
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2259
                       int shift, int next_eip)
2260
{
2261
    int new_eip;
2262
    uint32_t esp, esp_mask;
2263
    target_ulong ssp;
2264

    
2265
    new_eip = new_eip1;
2266
    esp = ESP;
2267
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2268
    ssp = env->segs[R_SS].base;
2269
    if (shift) {
2270
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2271
        PUSHL(ssp, esp, esp_mask, next_eip);
2272
    } else {
2273
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2274
        PUSHW(ssp, esp, esp_mask, next_eip);
2275
    }
2276

    
2277
    SET_ESP(esp, esp_mask);
2278
    env->eip = new_eip;
2279
    env->segs[R_CS].selector = new_cs;
2280
    env->segs[R_CS].base = (new_cs << 4);
2281
}
2282

    
2283
/* protected mode call */
2284
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2285
                            int shift, int next_eip_addend)
2286
{
2287
    int new_stack, i;
2288
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2289
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2290
    uint32_t val, limit, old_sp_mask;
2291
    target_ulong ssp, old_ssp, next_eip;
2292

    
2293
    next_eip = env->eip + next_eip_addend;
2294
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2295
    LOG_PCALL_STATE(env);
2296
    if ((new_cs & 0xfffc) == 0)
2297
        raise_exception_err(EXCP0D_GPF, 0);
2298
    if (load_segment(&e1, &e2, new_cs) != 0)
2299
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2300
    cpl = env->hflags & HF_CPL_MASK;
2301
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2302
    if (e2 & DESC_S_MASK) {
2303
        if (!(e2 & DESC_CS_MASK))
2304
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2306
        if (e2 & DESC_C_MASK) {
2307
            /* conforming code segment */
2308
            if (dpl > cpl)
2309
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310
        } else {
2311
            /* non conforming code segment */
2312
            rpl = new_cs & 3;
2313
            if (rpl > cpl)
2314
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2315
            if (dpl != cpl)
2316
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317
        }
2318
        if (!(e2 & DESC_P_MASK))
2319
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2320

    
2321
#ifdef TARGET_X86_64
2322
        /* XXX: check 16/32 bit cases in long mode */
2323
        if (shift == 2) {
2324
            target_ulong rsp;
2325
            /* 64 bit case */
2326
            rsp = ESP;
2327
            PUSHQ(rsp, env->segs[R_CS].selector);
2328
            PUSHQ(rsp, next_eip);
2329
            /* from this point, not restartable */
2330
            ESP = rsp;
2331
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2332
                                   get_seg_base(e1, e2),
2333
                                   get_seg_limit(e1, e2), e2);
2334
            EIP = new_eip;
2335
        } else
2336
#endif
2337
        {
2338
            sp = ESP;
2339
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2340
            ssp = env->segs[R_SS].base;
2341
            if (shift) {
2342
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2343
                PUSHL(ssp, sp, sp_mask, next_eip);
2344
            } else {
2345
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2346
                PUSHW(ssp, sp, sp_mask, next_eip);
2347
            }
2348

    
2349
            limit = get_seg_limit(e1, e2);
2350
            if (new_eip > limit)
2351
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2352
            /* from this point, not restartable */
2353
            SET_ESP(sp, sp_mask);
2354
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2355
                                   get_seg_base(e1, e2), limit, e2);
2356
            EIP = new_eip;
2357
        }
2358
    } else {
2359
        /* check gate type */
2360
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2361
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2362
        rpl = new_cs & 3;
2363
        switch(type) {
2364
        case 1: /* available 286 TSS */
2365
        case 9: /* available 386 TSS */
2366
        case 5: /* task gate */
2367
            if (dpl < cpl || dpl < rpl)
2368
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2370
            CC_OP = CC_OP_EFLAGS;
2371
            return;
2372
        case 4: /* 286 call gate */
2373
        case 12: /* 386 call gate */
2374
            break;
2375
        default:
2376
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377
            break;
2378
        }
2379
        shift = type >> 3;
2380

    
2381
        if (dpl < cpl || dpl < rpl)
2382
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2383
        /* check valid bit */
2384
        if (!(e2 & DESC_P_MASK))
2385
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2386
        selector = e1 >> 16;
2387
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2388
        param_count = e2 & 0x1f;
2389
        if ((selector & 0xfffc) == 0)
2390
            raise_exception_err(EXCP0D_GPF, 0);
2391

    
2392
        if (load_segment(&e1, &e2, selector) != 0)
2393
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2394
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2395
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2396
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2397
        if (dpl > cpl)
2398
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2399
        if (!(e2 & DESC_P_MASK))
2400
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2401

    
2402
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2403
            /* to inner privilege */
2404
            get_ss_esp_from_tss(&ss, &sp, dpl);
2405
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2406
                        ss, sp, param_count, ESP);
2407
            if ((ss & 0xfffc) == 0)
2408
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2409
            if ((ss & 3) != dpl)
2410
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2411
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2412
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2413
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2414
            if (ss_dpl != dpl)
2415
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2416
            if (!(ss_e2 & DESC_S_MASK) ||
2417
                (ss_e2 & DESC_CS_MASK) ||
2418
                !(ss_e2 & DESC_W_MASK))
2419
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2420
            if (!(ss_e2 & DESC_P_MASK))
2421
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2422

    
2423
            //            push_size = ((param_count * 2) + 8) << shift;
2424

    
2425
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2426
            old_ssp = env->segs[R_SS].base;
2427

    
2428
            sp_mask = get_sp_mask(ss_e2);
2429
            ssp = get_seg_base(ss_e1, ss_e2);
2430
            if (shift) {
2431
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2432
                PUSHL(ssp, sp, sp_mask, ESP);
2433
                for(i = param_count - 1; i >= 0; i--) {
2434
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2435
                    PUSHL(ssp, sp, sp_mask, val);
2436
                }
2437
            } else {
2438
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2439
                PUSHW(ssp, sp, sp_mask, ESP);
2440
                for(i = param_count - 1; i >= 0; i--) {
2441
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2442
                    PUSHW(ssp, sp, sp_mask, val);
2443
                }
2444
            }
2445
            new_stack = 1;
2446
        } else {
2447
            /* to same privilege */
2448
            sp = ESP;
2449
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2450
            ssp = env->segs[R_SS].base;
2451
            //            push_size = (4 << shift);
2452
            new_stack = 0;
2453
        }
2454

    
2455
        if (shift) {
2456
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2457
            PUSHL(ssp, sp, sp_mask, next_eip);
2458
        } else {
2459
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2460
            PUSHW(ssp, sp, sp_mask, next_eip);
2461
        }
2462

    
2463
        /* from this point, not restartable */
2464

    
2465
        if (new_stack) {
2466
            ss = (ss & ~3) | dpl;
2467
            cpu_x86_load_seg_cache(env, R_SS, ss,
2468
                                   ssp,
2469
                                   get_seg_limit(ss_e1, ss_e2),
2470
                                   ss_e2);
2471
        }
2472

    
2473
        selector = (selector & ~3) | dpl;
2474
        cpu_x86_load_seg_cache(env, R_CS, selector,
2475
                       get_seg_base(e1, e2),
2476
                       get_seg_limit(e1, e2),
2477
                       e2);
2478
        cpu_x86_set_cpl(env, dpl);
2479
        SET_ESP(sp, sp_mask);
2480
        EIP = offset;
2481
    }
2482
#ifdef USE_KQEMU
2483
    if (kqemu_is_ok(env)) {
2484
        env->exception_index = -1;
2485
        cpu_loop_exit();
2486
    }
2487
#endif
2488
}
2489

    
2490
/* real and vm86 mode iret */
2491
void helper_iret_real(int shift)
2492
{
2493
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2494
    target_ulong ssp;
2495
    int eflags_mask;
2496

    
2497
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2498
    sp = ESP;
2499
    ssp = env->segs[R_SS].base;
2500
    if (shift == 1) {
2501
        /* 32 bits */
2502
        POPL(ssp, sp, sp_mask, new_eip);
2503
        POPL(ssp, sp, sp_mask, new_cs);
2504
        new_cs &= 0xffff;
2505
        POPL(ssp, sp, sp_mask, new_eflags);
2506
    } else {
2507
        /* 16 bits */
2508
        POPW(ssp, sp, sp_mask, new_eip);
2509
        POPW(ssp, sp, sp_mask, new_cs);
2510
        POPW(ssp, sp, sp_mask, new_eflags);
2511
    }
2512
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2513
    env->segs[R_CS].selector = new_cs;
2514
    env->segs[R_CS].base = (new_cs << 4);
2515
    env->eip = new_eip;
2516
    if (env->eflags & VM_MASK)
2517
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2518
    else
2519
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2520
    if (shift == 0)
2521
        eflags_mask &= 0xffff;
2522
    load_eflags(new_eflags, eflags_mask);
2523
    env->hflags2 &= ~HF2_NMI_MASK;
2524
}
2525

    
2526
static inline void validate_seg(int seg_reg, int cpl)
2527
{
2528
    int dpl;
2529
    uint32_t e2;
2530

    
2531
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2532
       they may still contain a valid base. I would be interested to
2533
       know how a real x86_64 CPU behaves */
2534
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2535
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2536
        return;
2537

    
2538
    e2 = env->segs[seg_reg].flags;
2539
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2540
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2541
        /* data or non conforming code segment */
2542
        if (dpl < cpl) {
2543
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2544
        }
2545
    }
2546
}
2547

    
2548
/* protected mode iret */
2549
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2550
{
2551
    uint32_t new_cs, new_eflags, new_ss;
2552
    uint32_t new_es, new_ds, new_fs, new_gs;
2553
    uint32_t e1, e2, ss_e1, ss_e2;
2554
    int cpl, dpl, rpl, eflags_mask, iopl;
2555
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2556

    
2557
#ifdef TARGET_X86_64
2558
    if (shift == 2)
2559
        sp_mask = -1;
2560
    else
2561
#endif
2562
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2563
    sp = ESP;
2564
    ssp = env->segs[R_SS].base;
2565
    new_eflags = 0; /* avoid warning */
2566
#ifdef TARGET_X86_64
2567
    if (shift == 2) {
2568
        POPQ(sp, new_eip);
2569
        POPQ(sp, new_cs);
2570
        new_cs &= 0xffff;
2571
        if (is_iret) {
2572
            POPQ(sp, new_eflags);
2573
        }
2574
    } else
2575
#endif
2576
    if (shift == 1) {
2577
        /* 32 bits */
2578
        POPL(ssp, sp, sp_mask, new_eip);
2579
        POPL(ssp, sp, sp_mask, new_cs);
2580
        new_cs &= 0xffff;
2581
        if (is_iret) {
2582
            POPL(ssp, sp, sp_mask, new_eflags);
2583
            if (new_eflags & VM_MASK)
2584
                goto return_to_vm86;
2585
        }
2586
    } else {
2587
        /* 16 bits */
2588
        POPW(ssp, sp, sp_mask, new_eip);
2589
        POPW(ssp, sp, sp_mask, new_cs);
2590
        if (is_iret)
2591
            POPW(ssp, sp, sp_mask, new_eflags);
2592
    }
2593
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2594
              new_cs, new_eip, shift, addend);
2595
    LOG_PCALL_STATE(env);
2596
    if ((new_cs & 0xfffc) == 0)
2597
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2598
    if (load_segment(&e1, &e2, new_cs) != 0)
2599
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2600
    if (!(e2 & DESC_S_MASK) ||
2601
        !(e2 & DESC_CS_MASK))
2602
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2603
    cpl = env->hflags & HF_CPL_MASK;
2604
    rpl = new_cs & 3;
2605
    if (rpl < cpl)
2606
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2607
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608
    if (e2 & DESC_C_MASK) {
2609
        if (dpl > rpl)
2610
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2611
    } else {
2612
        if (dpl != rpl)
2613
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2614
    }
2615
    if (!(e2 & DESC_P_MASK))
2616
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2617

    
2618
    sp += addend;
2619
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2620
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2621
        /* return to same privilege level */
2622
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2623
                       get_seg_base(e1, e2),
2624
                       get_seg_limit(e1, e2),
2625
                       e2);
2626
    } else {
2627
        /* return to different privilege level */
2628
#ifdef TARGET_X86_64
2629
        if (shift == 2) {
2630
            POPQ(sp, new_esp);
2631
            POPQ(sp, new_ss);
2632
            new_ss &= 0xffff;
2633
        } else
2634
#endif
2635
        if (shift == 1) {
2636
            /* 32 bits */
2637
            POPL(ssp, sp, sp_mask, new_esp);
2638
            POPL(ssp, sp, sp_mask, new_ss);
2639
            new_ss &= 0xffff;
2640
        } else {
2641
            /* 16 bits */
2642
            POPW(ssp, sp, sp_mask, new_esp);
2643
            POPW(ssp, sp, sp_mask, new_ss);
2644
        }
2645
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2646
                    new_ss, new_esp);
2647
        if ((new_ss & 0xfffc) == 0) {
2648
#ifdef TARGET_X86_64
2649
            /* NULL ss is allowed in long mode if cpl != 3*/
2650
            /* XXX: test CS64 ? */
2651
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2652
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2653
                                       0, 0xffffffff,
2654
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2655
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2656
                                       DESC_W_MASK | DESC_A_MASK);
2657
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2658
            } else
2659
#endif
2660
            {
2661
                raise_exception_err(EXCP0D_GPF, 0);
2662
            }
2663
        } else {
2664
            if ((new_ss & 3) != rpl)
2665
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2666
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2667
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2668
            if (!(ss_e2 & DESC_S_MASK) ||
2669
                (ss_e2 & DESC_CS_MASK) ||
2670
                !(ss_e2 & DESC_W_MASK))
2671
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2672
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2673
            if (dpl != rpl)
2674
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2675
            if (!(ss_e2 & DESC_P_MASK))
2676
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2677
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2678
                                   get_seg_base(ss_e1, ss_e2),
2679
                                   get_seg_limit(ss_e1, ss_e2),
2680
                                   ss_e2);
2681
        }
2682

    
2683
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2684
                       get_seg_base(e1, e2),
2685
                       get_seg_limit(e1, e2),
2686
                       e2);
2687
        cpu_x86_set_cpl(env, rpl);
2688
        sp = new_esp;
2689
#ifdef TARGET_X86_64
2690
        if (env->hflags & HF_CS64_MASK)
2691
            sp_mask = -1;
2692
        else
2693
#endif
2694
            sp_mask = get_sp_mask(ss_e2);
2695

    
2696
        /* validate data segments */
2697
        validate_seg(R_ES, rpl);
2698
        validate_seg(R_DS, rpl);
2699
        validate_seg(R_FS, rpl);
2700
        validate_seg(R_GS, rpl);
2701

    
2702
        sp += addend;
2703
    }
2704
    SET_ESP(sp, sp_mask);
2705
    env->eip = new_eip;
2706
    if (is_iret) {
2707
        /* NOTE: 'cpl' is the _old_ CPL */
2708
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2709
        if (cpl == 0)
2710
            eflags_mask |= IOPL_MASK;
2711
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2712
        if (cpl <= iopl)
2713
            eflags_mask |= IF_MASK;
2714
        if (shift == 0)
2715
            eflags_mask &= 0xffff;
2716
        load_eflags(new_eflags, eflags_mask);
2717
    }
2718
    return;
2719

    
2720
 return_to_vm86:
2721
    POPL(ssp, sp, sp_mask, new_esp);
2722
    POPL(ssp, sp, sp_mask, new_ss);
2723
    POPL(ssp, sp, sp_mask, new_es);
2724
    POPL(ssp, sp, sp_mask, new_ds);
2725
    POPL(ssp, sp, sp_mask, new_fs);
2726
    POPL(ssp, sp, sp_mask, new_gs);
2727

    
2728
    /* modify processor state */
2729
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2730
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2731
    load_seg_vm(R_CS, new_cs & 0xffff);
2732
    cpu_x86_set_cpl(env, 3);
2733
    load_seg_vm(R_SS, new_ss & 0xffff);
2734
    load_seg_vm(R_ES, new_es & 0xffff);
2735
    load_seg_vm(R_DS, new_ds & 0xffff);
2736
    load_seg_vm(R_FS, new_fs & 0xffff);
2737
    load_seg_vm(R_GS, new_gs & 0xffff);
2738

    
2739
    env->eip = new_eip & 0xffff;
2740
    ESP = new_esp;
2741
}
2742

    
2743
void helper_iret_protected(int shift, int next_eip)
2744
{
2745
    int tss_selector, type;
2746
    uint32_t e1, e2;
2747

    
2748
    /* specific case for TSS */
2749
    if (env->eflags & NT_MASK) {
2750
#ifdef TARGET_X86_64
2751
        if (env->hflags & HF_LMA_MASK)
2752
            raise_exception_err(EXCP0D_GPF, 0);
2753
#endif
2754
        tss_selector = lduw_kernel(env->tr.base + 0);
2755
        if (tss_selector & 4)
2756
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2757
        if (load_segment(&e1, &e2, tss_selector) != 0)
2758
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2759
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2760
        /* NOTE: we check both segment and busy TSS */
2761
        if (type != 3)
2762
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2763
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2764
    } else {
2765
        helper_ret_protected(shift, 1, 0);
2766
    }
2767
    env->hflags2 &= ~HF2_NMI_MASK;
2768
#ifdef USE_KQEMU
2769
    if (kqemu_is_ok(env)) {
2770
        CC_OP = CC_OP_EFLAGS;
2771
        env->exception_index = -1;
2772
        cpu_loop_exit();
2773
    }
2774
#endif
2775
}
2776

    
2777
void helper_lret_protected(int shift, int addend)
2778
{
2779
    helper_ret_protected(shift, 0, addend);
2780
#ifdef USE_KQEMU
2781
    if (kqemu_is_ok(env)) {
2782
        env->exception_index = -1;
2783
        cpu_loop_exit();
2784
    }
2785
#endif
2786
}
2787

    
2788
void helper_sysenter(void)
2789
{
2790
    if (env->sysenter_cs == 0) {
2791
        raise_exception_err(EXCP0D_GPF, 0);
2792
    }
2793
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2794
    cpu_x86_set_cpl(env, 0);
2795

    
2796
#ifdef TARGET_X86_64
2797
    if (env->hflags & HF_LMA_MASK) {
2798
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2799
                               0, 0xffffffff,
2800
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2801
                               DESC_S_MASK |
2802
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2803
    } else
2804
#endif
2805
    {
2806
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2807
                               0, 0xffffffff,
2808
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2809
                               DESC_S_MASK |
2810
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2811
    }
2812
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2813
                           0, 0xffffffff,
2814
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815
                           DESC_S_MASK |
2816
                           DESC_W_MASK | DESC_A_MASK);
2817
    ESP = env->sysenter_esp;
2818
    EIP = env->sysenter_eip;
2819
}
2820

    
2821
void helper_sysexit(int dflag)
2822
{
2823
    int cpl;
2824

    
2825
    cpl = env->hflags & HF_CPL_MASK;
2826
    if (env->sysenter_cs == 0 || cpl != 0) {
2827
        raise_exception_err(EXCP0D_GPF, 0);
2828
    }
2829
    cpu_x86_set_cpl(env, 3);
2830
#ifdef TARGET_X86_64
2831
    if (dflag == 2) {
2832
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2833
                               0, 0xffffffff,
2834
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2837
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2838
                               0, 0xffffffff,
2839
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841
                               DESC_W_MASK | DESC_A_MASK);
2842
    } else
2843
#endif
2844
    {
2845
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2846
                               0, 0xffffffff,
2847
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2848
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2849
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2850
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2851
                               0, 0xffffffff,
2852
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854
                               DESC_W_MASK | DESC_A_MASK);
2855
    }
2856
    ESP = ECX;
2857
    EIP = EDX;
2858
#ifdef USE_KQEMU
2859
    if (kqemu_is_ok(env)) {
2860
        env->exception_index = -1;
2861
        cpu_loop_exit();
2862
    }
2863
#endif
2864
}
2865

    
2866
#if defined(CONFIG_USER_ONLY)
2867
target_ulong helper_read_crN(int reg)
2868
{
2869
    return 0;
2870
}
2871

    
2872
void helper_write_crN(int reg, target_ulong t0)
2873
{
2874
}
2875

    
2876
void helper_movl_drN_T0(int reg, target_ulong t0)
2877
{
2878
}
2879
#else
2880
target_ulong helper_read_crN(int reg)
2881
{
2882
    target_ulong val;
2883

    
2884
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2885
    switch(reg) {
2886
    default:
2887
        val = env->cr[reg];
2888
        break;
2889
    case 8:
2890
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2891
            val = cpu_get_apic_tpr(env);
2892
        } else {
2893
            val = env->v_tpr;
2894
        }
2895
        break;
2896
    }
2897
    return val;
2898
}
2899

    
2900
void helper_write_crN(int reg, target_ulong t0)
2901
{
2902
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2903
    switch(reg) {
2904
    case 0:
2905
        cpu_x86_update_cr0(env, t0);
2906
        break;
2907
    case 3:
2908
        cpu_x86_update_cr3(env, t0);
2909
        break;
2910
    case 4:
2911
        cpu_x86_update_cr4(env, t0);
2912
        break;
2913
    case 8:
2914
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2915
            cpu_set_apic_tpr(env, t0);
2916
        }
2917
        env->v_tpr = t0 & 0x0f;
2918
        break;
2919
    default:
2920
        env->cr[reg] = t0;
2921
        break;
2922
    }
2923
}
2924

    
2925
void helper_movl_drN_T0(int reg, target_ulong t0)
2926
{
2927
    int i;
2928

    
2929
    if (reg < 4) {
2930
        hw_breakpoint_remove(env, reg);
2931
        env->dr[reg] = t0;
2932
        hw_breakpoint_insert(env, reg);
2933
    } else if (reg == 7) {
2934
        for (i = 0; i < 4; i++)
2935
            hw_breakpoint_remove(env, i);
2936
        env->dr[7] = t0;
2937
        for (i = 0; i < 4; i++)
2938
            hw_breakpoint_insert(env, i);
2939
    } else
2940
        env->dr[reg] = t0;
2941
}
2942
#endif
2943

    
2944
void helper_lmsw(target_ulong t0)
2945
{
2946
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2947
       if already set to one. */
2948
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2949
    helper_write_crN(0, t0);
2950
}
2951

    
2952
void helper_clts(void)
2953
{
2954
    env->cr[0] &= ~CR0_TS_MASK;
2955
    env->hflags &= ~HF_TS_MASK;
2956
}
2957

    
2958
void helper_invlpg(target_ulong addr)
2959
{
2960
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2961
    tlb_flush_page(env, addr);
2962
}
2963

    
2964
void helper_rdtsc(void)
2965
{
2966
    uint64_t val;
2967

    
2968
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2969
        raise_exception(EXCP0D_GPF);
2970
    }
2971
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2972

    
2973
    val = cpu_get_tsc(env) + env->tsc_offset;
2974
    EAX = (uint32_t)(val);
2975
    EDX = (uint32_t)(val >> 32);
2976
}
2977

    
2978
void helper_rdpmc(void)
2979
{
2980
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2981
        raise_exception(EXCP0D_GPF);
2982
    }
2983
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2984
    
2985
    /* currently unimplemented */
2986
    raise_exception_err(EXCP06_ILLOP, 0);
2987
}
2988

    
2989
#if defined(CONFIG_USER_ONLY)
2990
void helper_wrmsr(void)
2991
{
2992
}
2993

    
2994
void helper_rdmsr(void)
2995
{
2996
}
2997
#else
2998
void helper_wrmsr(void)
2999
{
3000
    uint64_t val;
3001

    
3002
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3003

    
3004
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3005

    
3006
    switch((uint32_t)ECX) {
3007
    case MSR_IA32_SYSENTER_CS:
3008
        env->sysenter_cs = val & 0xffff;
3009
        break;
3010
    case MSR_IA32_SYSENTER_ESP:
3011
        env->sysenter_esp = val;
3012
        break;
3013
    case MSR_IA32_SYSENTER_EIP:
3014
        env->sysenter_eip = val;
3015
        break;
3016
    case MSR_IA32_APICBASE:
3017
        cpu_set_apic_base(env, val);
3018
        break;
3019
    case MSR_EFER:
3020
        {
3021
            uint64_t update_mask;
3022
            update_mask = 0;
3023
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3024
                update_mask |= MSR_EFER_SCE;
3025
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3026
                update_mask |= MSR_EFER_LME;
3027
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3028
                update_mask |= MSR_EFER_FFXSR;
3029
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3030
                update_mask |= MSR_EFER_NXE;
3031
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3032
                update_mask |= MSR_EFER_SVME;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3034
                update_mask |= MSR_EFER_FFXSR;
3035
            cpu_load_efer(env, (env->efer & ~update_mask) |
3036
                          (val & update_mask));
3037
        }
3038
        break;
3039
    case MSR_STAR:
3040
        env->star = val;
3041
        break;
3042
    case MSR_PAT:
3043
        env->pat = val;
3044
        break;
3045
    case MSR_VM_HSAVE_PA:
3046
        env->vm_hsave = val;
3047
        break;
3048
#ifdef TARGET_X86_64
3049
    case MSR_LSTAR:
3050
        env->lstar = val;
3051
        break;
3052
    case MSR_CSTAR:
3053
        env->cstar = val;
3054
        break;
3055
    case MSR_FMASK:
3056
        env->fmask = val;
3057
        break;
3058
    case MSR_FSBASE:
3059
        env->segs[R_FS].base = val;
3060
        break;
3061
    case MSR_GSBASE:
3062
        env->segs[R_GS].base = val;
3063
        break;
3064
    case MSR_KERNELGSBASE:
3065
        env->kernelgsbase = val;
3066
        break;
3067
#endif
3068
    case MSR_MTRRphysBase(0):
3069
    case MSR_MTRRphysBase(1):
3070
    case MSR_MTRRphysBase(2):
3071
    case MSR_MTRRphysBase(3):
3072
    case MSR_MTRRphysBase(4):
3073
    case MSR_MTRRphysBase(5):
3074
    case MSR_MTRRphysBase(6):
3075
    case MSR_MTRRphysBase(7):
3076
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3077
        break;
3078
    case MSR_MTRRphysMask(0):
3079
    case MSR_MTRRphysMask(1):
3080
    case MSR_MTRRphysMask(2):
3081
    case MSR_MTRRphysMask(3):
3082
    case MSR_MTRRphysMask(4):
3083
    case MSR_MTRRphysMask(5):
3084
    case MSR_MTRRphysMask(6):
3085
    case MSR_MTRRphysMask(7):
3086
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3087
        break;
3088
    case MSR_MTRRfix64K_00000:
3089
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3090
        break;
3091
    case MSR_MTRRfix16K_80000:
3092
    case MSR_MTRRfix16K_A0000:
3093
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3094
        break;
3095
    case MSR_MTRRfix4K_C0000:
3096
    case MSR_MTRRfix4K_C8000:
3097
    case MSR_MTRRfix4K_D0000:
3098
    case MSR_MTRRfix4K_D8000:
3099
    case MSR_MTRRfix4K_E0000:
3100
    case MSR_MTRRfix4K_E8000:
3101
    case MSR_MTRRfix4K_F0000:
3102
    case MSR_MTRRfix4K_F8000:
3103
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3104
        break;
3105
    case MSR_MTRRdefType:
3106
        env->mtrr_deftype = val;
3107
        break;
3108
    default:
3109
        /* XXX: exception ? */
3110
        break;
3111
    }
3112
}
3113

    
3114
void helper_rdmsr(void)
3115
{
3116
    uint64_t val;
3117

    
3118
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3119

    
3120
    switch((uint32_t)ECX) {
3121
    case MSR_IA32_SYSENTER_CS:
3122
        val = env->sysenter_cs;
3123
        break;
3124
    case MSR_IA32_SYSENTER_ESP:
3125
        val = env->sysenter_esp;
3126
        break;
3127
    case MSR_IA32_SYSENTER_EIP:
3128
        val = env->sysenter_eip;
3129
        break;
3130
    case MSR_IA32_APICBASE:
3131
        val = cpu_get_apic_base(env);
3132
        break;
3133
    case MSR_EFER:
3134
        val = env->efer;
3135
        break;
3136
    case MSR_STAR:
3137
        val = env->star;
3138
        break;
3139
    case MSR_PAT:
3140
        val = env->pat;
3141
        break;
3142
    case MSR_VM_HSAVE_PA:
3143
        val = env->vm_hsave;
3144
        break;
3145
    case MSR_IA32_PERF_STATUS:
3146
        /* tsc_increment_by_tick */
3147
        val = 1000ULL;
3148
        /* CPU multiplier */
3149
        val |= (((uint64_t)4ULL) << 40);
3150
        break;
3151
#ifdef TARGET_X86_64
3152
    case MSR_LSTAR:
3153
        val = env->lstar;
3154
        break;
3155
    case MSR_CSTAR:
3156
        val = env->cstar;
3157
        break;
3158
    case MSR_FMASK:
3159
        val = env->fmask;
3160
        break;
3161
    case MSR_FSBASE:
3162
        val = env->segs[R_FS].base;
3163
        break;
3164
    case MSR_GSBASE:
3165
        val = env->segs[R_GS].base;
3166
        break;
3167
    case MSR_KERNELGSBASE:
3168
        val = env->kernelgsbase;
3169
        break;
3170
#endif
3171
#ifdef USE_KQEMU
3172
    case MSR_QPI_COMMBASE:
3173
        if (env->kqemu_enabled) {
3174
            val = kqemu_comm_base;
3175
        } else {
3176
            val = 0;
3177
        }
3178
        break;
3179
#endif
3180
    case MSR_MTRRphysBase(0):
3181
    case MSR_MTRRphysBase(1):
3182
    case MSR_MTRRphysBase(2):
3183
    case MSR_MTRRphysBase(3):
3184
    case MSR_MTRRphysBase(4):
3185
    case MSR_MTRRphysBase(5):
3186
    case MSR_MTRRphysBase(6):
3187
    case MSR_MTRRphysBase(7):
3188
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3189
        break;
3190
    case MSR_MTRRphysMask(0):
3191
    case MSR_MTRRphysMask(1):
3192
    case MSR_MTRRphysMask(2):
3193
    case MSR_MTRRphysMask(3):
3194
    case MSR_MTRRphysMask(4):
3195
    case MSR_MTRRphysMask(5):
3196
    case MSR_MTRRphysMask(6):
3197
    case MSR_MTRRphysMask(7):
3198
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3199
        break;
3200
    case MSR_MTRRfix64K_00000:
3201
        val = env->mtrr_fixed[0];
3202
        break;
3203
    case MSR_MTRRfix16K_80000:
3204
    case MSR_MTRRfix16K_A0000:
3205
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3206
        break;
3207
    case MSR_MTRRfix4K_C0000:
3208
    case MSR_MTRRfix4K_C8000:
3209
    case MSR_MTRRfix4K_D0000:
3210
    case MSR_MTRRfix4K_D8000:
3211
    case MSR_MTRRfix4K_E0000:
3212
    case MSR_MTRRfix4K_E8000:
3213
    case MSR_MTRRfix4K_F0000:
3214
    case MSR_MTRRfix4K_F8000:
3215
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3216
        break;
3217
    case MSR_MTRRdefType:
3218
        val = env->mtrr_deftype;
3219
        break;
3220
    case MSR_MTRRcap:
3221
        if (env->cpuid_features & CPUID_MTRR)
3222
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3223
        else
3224
            /* XXX: exception ? */
3225
            val = 0;
3226
        break;
3227
    default:
3228
        /* XXX: exception ? */
3229
        val = 0;
3230
        break;
3231
    }
3232
    EAX = (uint32_t)(val);
3233
    EDX = (uint32_t)(val >> 32);
3234
}
3235
#endif
3236

    
3237
target_ulong helper_lsl(target_ulong selector1)
3238
{
3239
    unsigned int limit;
3240
    uint32_t e1, e2, eflags, selector;
3241
    int rpl, dpl, cpl, type;
3242

    
3243
    selector = selector1 & 0xffff;
3244
    eflags = helper_cc_compute_all(CC_OP);
3245
    if (load_segment(&e1, &e2, selector) != 0)
3246
        goto fail;
3247
    rpl = selector & 3;
3248
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3249
    cpl = env->hflags & HF_CPL_MASK;
3250
    if (e2 & DESC_S_MASK) {
3251
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3252
            /* conforming */
3253
        } else {
3254
            if (dpl < cpl || dpl < rpl)
3255
                goto fail;
3256
        }
3257
    } else {
3258
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3259
        switch(type) {
3260
        case 1:
3261
        case 2:
3262
        case 3:
3263
        case 9:
3264
        case 11:
3265
            break;
3266
        default:
3267
            goto fail;
3268
        }
3269
        if (dpl < cpl || dpl < rpl) {
3270
        fail:
3271
            CC_SRC = eflags & ~CC_Z;
3272
            return 0;
3273
        }
3274
    }
3275
    limit = get_seg_limit(e1, e2);
3276
    CC_SRC = eflags | CC_Z;
3277
    return limit;
3278
}
3279

    
3280
target_ulong helper_lar(target_ulong selector1)
3281
{
3282
    uint32_t e1, e2, eflags, selector;
3283
    int rpl, dpl, cpl, type;
3284

    
3285
    selector = selector1 & 0xffff;
3286
    eflags = helper_cc_compute_all(CC_OP);
3287
    if ((selector & 0xfffc) == 0)
3288
        goto fail;
3289
    if (load_segment(&e1, &e2, selector) != 0)
3290
        goto fail;
3291
    rpl = selector & 3;
3292
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293
    cpl = env->hflags & HF_CPL_MASK;
3294
    if (e2 & DESC_S_MASK) {
3295
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3296
            /* conforming */
3297
        } else {
3298
            if (dpl < cpl || dpl < rpl)
3299
                goto fail;
3300
        }
3301
    } else {
3302
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3303
        switch(type) {
3304
        case 1:
3305
        case 2:
3306
        case 3:
3307
        case 4:
3308
        case 5:
3309
        case 9:
3310
        case 11:
3311
        case 12:
3312
            break;
3313
        default:
3314
            goto fail;
3315
        }
3316
        if (dpl < cpl || dpl < rpl) {
3317
        fail:
3318
            CC_SRC = eflags & ~CC_Z;
3319
            return 0;
3320
        }
3321
    }
3322
    CC_SRC = eflags | CC_Z;
3323
    return e2 & 0x00f0ff00;
3324
}
3325

    
3326
void helper_verr(target_ulong selector1)
3327
{
3328
    uint32_t e1, e2, eflags, selector;
3329
    int rpl, dpl, cpl;
3330

    
3331
    selector = selector1 & 0xffff;
3332
    eflags = helper_cc_compute_all(CC_OP);
3333
    if ((selector & 0xfffc) == 0)
3334
        goto fail;
3335
    if (load_segment(&e1, &e2, selector) != 0)
3336
        goto fail;
3337
    if (!(e2 & DESC_S_MASK))
3338
        goto fail;
3339
    rpl = selector & 3;
3340
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3341
    cpl = env->hflags & HF_CPL_MASK;
3342
    if (e2 & DESC_CS_MASK) {
3343
        if (!(e2 & DESC_R_MASK))
3344
            goto fail;
3345
        if (!(e2 & DESC_C_MASK)) {
3346
            if (dpl < cpl || dpl < rpl)
3347
                goto fail;
3348
        }
3349
    } else {
3350
        if (dpl < cpl || dpl < rpl) {
3351
        fail:
3352
            CC_SRC = eflags & ~CC_Z;
3353
            return;
3354
        }
3355
    }
3356
    CC_SRC = eflags | CC_Z;
3357
}
3358

    
3359
void helper_verw(target_ulong selector1)
3360
{
3361
    uint32_t e1, e2, eflags, selector;
3362
    int rpl, dpl, cpl;
3363

    
3364
    selector = selector1 & 0xffff;
3365
    eflags = helper_cc_compute_all(CC_OP);
3366
    if ((selector & 0xfffc) == 0)
3367
        goto fail;
3368
    if (load_segment(&e1, &e2, selector) != 0)
3369
        goto fail;
3370
    if (!(e2 & DESC_S_MASK))
3371
        goto fail;
3372
    rpl = selector & 3;
3373
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3374
    cpl = env->hflags & HF_CPL_MASK;
3375
    if (e2 & DESC_CS_MASK) {
3376
        goto fail;
3377
    } else {
3378
        if (dpl < cpl || dpl < rpl)
3379
            goto fail;
3380
        if (!(e2 & DESC_W_MASK)) {
3381
        fail:
3382
            CC_SRC = eflags & ~CC_Z;
3383
            return;
3384
        }
3385
    }
3386
    CC_SRC = eflags | CC_Z;
3387
}
3388

    
3389
/* x87 FPU helpers */
3390

    
3391
static void fpu_set_exception(int mask)
3392
{
3393
    env->fpus |= mask;
3394
    if (env->fpus & (~env->fpuc & FPUC_EM))
3395
        env->fpus |= FPUS_SE | FPUS_B;
3396
}
3397

    
3398
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3399
{
3400
    if (b == 0.0)
3401
        fpu_set_exception(FPUS_ZE);
3402
    return a / b;
3403
}
3404

    
3405
static void fpu_raise_exception(void)
3406
{
3407
    if (env->cr[0] & CR0_NE_MASK) {
3408
        raise_exception(EXCP10_COPR);
3409
    }
3410
#if !defined(CONFIG_USER_ONLY)
3411
    else {
3412
        cpu_set_ferr(env);
3413
    }
3414
#endif
3415
}
3416

    
3417
void helper_flds_FT0(uint32_t val)
3418
{
3419
    union {
3420
        float32 f;
3421
        uint32_t i;
3422
    } u;
3423
    u.i = val;
3424
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3425
}
3426

    
3427
void helper_fldl_FT0(uint64_t val)
3428
{
3429
    union {
3430
        float64 f;
3431
        uint64_t i;
3432
    } u;
3433
    u.i = val;
3434
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3435
}
3436

    
3437
void helper_fildl_FT0(int32_t val)
3438
{
3439
    FT0 = int32_to_floatx(val, &env->fp_status);
3440
}
3441

    
3442
void helper_flds_ST0(uint32_t val)
3443
{
3444
    int new_fpstt;
3445
    union {
3446
        float32 f;
3447
        uint32_t i;
3448
    } u;
3449
    new_fpstt = (env->fpstt - 1) & 7;
3450
    u.i = val;
3451
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3452
    env->fpstt = new_fpstt;
3453
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3454
}
3455

    
3456
void helper_fldl_ST0(uint64_t val)
3457
{
3458
    int new_fpstt;
3459
    union {
3460
        float64 f;
3461
        uint64_t i;
3462
    } u;
3463
    new_fpstt = (env->fpstt - 1) & 7;
3464
    u.i = val;
3465
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3466
    env->fpstt = new_fpstt;
3467
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3468
}
3469

    
3470
void helper_fildl_ST0(int32_t val)
3471
{
3472
    int new_fpstt;
3473
    new_fpstt = (env->fpstt - 1) & 7;
3474
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3475
    env->fpstt = new_fpstt;
3476
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3477
}
3478

    
3479
void helper_fildll_ST0(int64_t val)
3480
{
3481
    int new_fpstt;
3482
    new_fpstt = (env->fpstt - 1) & 7;
3483
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3484
    env->fpstt = new_fpstt;
3485
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3486
}
3487

    
3488
uint32_t helper_fsts_ST0(void)
3489
{
3490
    union {
3491
        float32 f;
3492
        uint32_t i;
3493
    } u;
3494
    u.f = floatx_to_float32(ST0, &env->fp_status);
3495
    return u.i;
3496
}
3497

    
3498
uint64_t helper_fstl_ST0(void)
3499
{
3500
    union {
3501
        float64 f;
3502
        uint64_t i;
3503
    } u;
3504
    u.f = floatx_to_float64(ST0, &env->fp_status);
3505
    return u.i;
3506
}
3507

    
3508
int32_t helper_fist_ST0(void)
3509
{
3510
    int32_t val;
3511
    val = floatx_to_int32(ST0, &env->fp_status);
3512
    if (val != (int16_t)val)
3513
        val = -32768;
3514
    return val;
3515
}
3516

    
3517
int32_t helper_fistl_ST0(void)
3518
{
3519
    int32_t val;
3520
    val = floatx_to_int32(ST0, &env->fp_status);
3521
    return val;
3522
}
3523

    
3524
int64_t helper_fistll_ST0(void)
3525
{
3526
    int64_t val;
3527
    val = floatx_to_int64(ST0, &env->fp_status);
3528
    return val;
3529
}
3530

    
3531
int32_t helper_fistt_ST0(void)
3532
{
3533
    int32_t val;
3534
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3535
    if (val != (int16_t)val)
3536
        val = -32768;
3537
    return val;
3538
}
3539

    
3540
int32_t helper_fisttl_ST0(void)
3541
{
3542
    int32_t val;
3543
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3544
    return val;
3545
}
3546

    
3547
int64_t helper_fisttll_ST0(void)
3548
{
3549
    int64_t val;
3550
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3551
    return val;
3552
}
3553

    
3554
void helper_fldt_ST0(target_ulong ptr)
3555
{
3556
    int new_fpstt;
3557
    new_fpstt = (env->fpstt - 1) & 7;
3558
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3559
    env->fpstt = new_fpstt;
3560
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3561
}
3562

    
3563
void helper_fstt_ST0(target_ulong ptr)
3564
{
3565
    helper_fstt(ST0, ptr);
3566
}
3567

    
3568
void helper_fpush(void)
3569
{
3570
    fpush();
3571
}
3572

    
3573
void helper_fpop(void)
3574
{
3575
    fpop();
3576
}
3577

    
3578
void helper_fdecstp(void)
3579
{
3580
    env->fpstt = (env->fpstt - 1) & 7;
3581
    env->fpus &= (~0x4700);
3582
}
3583

    
3584
void helper_fincstp(void)
3585
{
3586
    env->fpstt = (env->fpstt + 1) & 7;
3587
    env->fpus &= (~0x4700);
3588
}
3589

    
3590
/* FPU move */
3591

    
3592
void helper_ffree_STN(int st_index)
3593
{
3594
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3595
}
3596

    
3597
void helper_fmov_ST0_FT0(void)
3598
{
3599
    ST0 = FT0;
3600
}
3601

    
3602
void helper_fmov_FT0_STN(int st_index)
3603
{
3604
    FT0 = ST(st_index);
3605
}
3606

    
3607
void helper_fmov_ST0_STN(int st_index)
3608
{
3609
    ST0 = ST(st_index);
3610
}
3611

    
3612
void helper_fmov_STN_ST0(int st_index)
3613
{
3614
    ST(st_index) = ST0;
3615
}
3616

    
3617
void helper_fxchg_ST0_STN(int st_index)
3618
{
3619
    CPU86_LDouble tmp;
3620
    tmp = ST(st_index);
3621
    ST(st_index) = ST0;
3622
    ST0 = tmp;
3623
}
3624

    
3625
/* FPU operations */
3626

    
3627
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3628

    
3629
void helper_fcom_ST0_FT0(void)
3630
{
3631
    int ret;
3632

    
3633
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3634
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3635
}
3636

    
3637
void helper_fucom_ST0_FT0(void)
3638
{
3639
    int ret;
3640

    
3641
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3642
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3643
}
3644

    
3645
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3646

    
3647
void helper_fcomi_ST0_FT0(void)
3648
{
3649
    int eflags;
3650
    int ret;
3651

    
3652
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3653
    eflags = helper_cc_compute_all(CC_OP);
3654
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3655
    CC_SRC = eflags;
3656
}
3657

    
3658
void helper_fucomi_ST0_FT0(void)
3659
{
3660
    int eflags;
3661
    int ret;
3662

    
3663
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3664
    eflags = helper_cc_compute_all(CC_OP);
3665
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3666
    CC_SRC = eflags;
3667
}
3668

    
3669
void helper_fadd_ST0_FT0(void)
3670
{
3671
    ST0 += FT0;
3672
}
3673

    
3674
void helper_fmul_ST0_FT0(void)
3675
{
3676
    ST0 *= FT0;
3677
}
3678

    
3679
void helper_fsub_ST0_FT0(void)
3680
{
3681
    ST0 -= FT0;
3682
}
3683

    
3684
void helper_fsubr_ST0_FT0(void)
3685
{
3686
    ST0 = FT0 - ST0;
3687
}
3688

    
3689
void helper_fdiv_ST0_FT0(void)
3690
{
3691
    ST0 = helper_fdiv(ST0, FT0);
3692
}
3693

    
3694
void helper_fdivr_ST0_FT0(void)
3695
{
3696
    ST0 = helper_fdiv(FT0, ST0);
3697
}
3698

    
3699
/* fp operations between STN and ST0 */
3700

    
3701
void helper_fadd_STN_ST0(int st_index)
3702
{
3703
    ST(st_index) += ST0;
3704
}
3705

    
3706
void helper_fmul_STN_ST0(int st_index)
3707
{
3708
    ST(st_index) *= ST0;
3709
}
3710

    
3711
void helper_fsub_STN_ST0(int st_index)
3712
{
3713
    ST(st_index) -= ST0;
3714
}
3715

    
3716
void helper_fsubr_STN_ST0(int st_index)
3717
{
3718
    CPU86_LDouble *p;
3719
    p = &ST(st_index);
3720
    *p = ST0 - *p;
3721
}
3722

    
3723
void helper_fdiv_STN_ST0(int st_index)
3724
{
3725
    CPU86_LDouble *p;
3726
    p = &ST(st_index);
3727
    *p = helper_fdiv(*p, ST0);
3728
}
3729

    
3730
void helper_fdivr_STN_ST0(int st_index)
3731
{
3732
    CPU86_LDouble *p;
3733
    p = &ST(st_index);
3734
    *p = helper_fdiv(ST0, *p);
3735
}
3736

    
3737
/* misc FPU operations */
3738
void helper_fchs_ST0(void)
3739
{
3740
    ST0 = floatx_chs(ST0);
3741
}
3742

    
3743
void helper_fabs_ST0(void)
3744
{
3745
    ST0 = floatx_abs(ST0);
3746
}
3747

    
3748
void helper_fld1_ST0(void)
3749
{
3750
    ST0 = f15rk[1];
3751
}
3752

    
3753
void helper_fldl2t_ST0(void)
3754
{
3755
    ST0 = f15rk[6];
3756
}
3757

    
3758
void helper_fldl2e_ST0(void)
3759
{
3760
    ST0 = f15rk[5];
3761
}
3762

    
3763
void helper_fldpi_ST0(void)
3764
{
3765
    ST0 = f15rk[2];
3766
}
3767

    
3768
void helper_fldlg2_ST0(void)
3769
{
3770
    ST0 = f15rk[3];
3771
}
3772

    
3773
void helper_fldln2_ST0(void)
3774
{
3775
    ST0 = f15rk[4];
3776
}
3777

    
3778
void helper_fldz_ST0(void)
3779
{
3780
    ST0 = f15rk[0];
3781
}
3782

    
3783
void helper_fldz_FT0(void)
3784
{
3785
    FT0 = f15rk[0];
3786
}
3787

    
3788
uint32_t helper_fnstsw(void)
3789
{
3790
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3791
}
3792

    
3793
uint32_t helper_fnstcw(void)
3794
{
3795
    return env->fpuc;
3796
}
3797

    
3798
static void update_fp_status(void)
3799
{
3800
    int rnd_type;
3801

    
3802
    /* set rounding mode */
3803
    switch(env->fpuc & RC_MASK) {
3804
    default:
3805
    case RC_NEAR:
3806
        rnd_type = float_round_nearest_even;
3807
        break;
3808
    case RC_DOWN:
3809
        rnd_type = float_round_down;
3810
        break;
3811
    case RC_UP:
3812
        rnd_type = float_round_up;
3813
        break;
3814
    case RC_CHOP:
3815
        rnd_type = float_round_to_zero;
3816
        break;
3817
    }
3818
    set_float_rounding_mode(rnd_type, &env->fp_status);
3819
#ifdef FLOATX80
3820
    switch((env->fpuc >> 8) & 3) {
3821
    case 0:
3822
        rnd_type = 32;
3823
        break;
3824
    case 2:
3825
        rnd_type = 64;
3826
        break;
3827
    case 3:
3828
    default:
3829
        rnd_type = 80;
3830
        break;
3831
    }
3832
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3833
#endif
3834
}
3835

    
3836
void helper_fldcw(uint32_t val)
3837
{
3838
    env->fpuc = val;
3839
    update_fp_status();
3840
}
3841

    
3842
void helper_fclex(void)
3843
{
3844
    env->fpus &= 0x7f00;
3845
}
3846

    
3847
void helper_fwait(void)
3848
{
3849
    if (env->fpus & FPUS_SE)
3850
        fpu_raise_exception();
3851
}
3852

    
3853
void helper_fninit(void)
3854
{
3855
    env->fpus = 0;
3856
    env->fpstt = 0;
3857
    env->fpuc = 0x37f;
3858
    env->fptags[0] = 1;
3859
    env->fptags[1] = 1;
3860
    env->fptags[2] = 1;
3861
    env->fptags[3] = 1;
3862
    env->fptags[4] = 1;
3863
    env->fptags[5] = 1;
3864
    env->fptags[6] = 1;
3865
    env->fptags[7] = 1;
3866
}
3867

    
3868
/* BCD ops */
3869

    
3870
void helper_fbld_ST0(target_ulong ptr)
3871
{
3872
    CPU86_LDouble tmp;
3873
    uint64_t val;
3874
    unsigned int v;
3875
    int i;
3876

    
3877
    val = 0;
3878
    for(i = 8; i >= 0; i--) {
3879
        v = ldub(ptr + i);
3880
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3881
    }
3882
    tmp = val;
3883
    if (ldub(ptr + 9) & 0x80)
3884
        tmp = -tmp;
3885
    fpush();
3886
    ST0 = tmp;
3887
}
3888

    
3889
void helper_fbst_ST0(target_ulong ptr)
3890
{
3891
    int v;
3892
    target_ulong mem_ref, mem_end;
3893
    int64_t val;
3894

    
3895
    val = floatx_to_int64(ST0, &env->fp_status);
3896
    mem_ref = ptr;
3897
    mem_end = mem_ref + 9;
3898
    if (val < 0) {
3899
        stb(mem_end, 0x80);
3900
        val = -val;
3901
    } else {
3902
        stb(mem_end, 0x00);
3903
    }
3904
    while (mem_ref < mem_end) {
3905
        if (val == 0)
3906
            break;
3907
        v = val % 100;
3908
        val = val / 100;
3909
        v = ((v / 10) << 4) | (v % 10);
3910
        stb(mem_ref++, v);
3911
    }
3912
    while (mem_ref < mem_end) {
3913
        stb(mem_ref++, 0);
3914
    }
3915
}
3916

    
3917
void helper_f2xm1(void)
3918
{
3919
    ST0 = pow(2.0,ST0) - 1.0;
3920
}
3921

    
3922
void helper_fyl2x(void)
3923
{
3924
    CPU86_LDouble fptemp;
3925

    
3926
    fptemp = ST0;
3927
    if (fptemp>0.0){
3928
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3929
        ST1 *= fptemp;
3930
        fpop();
3931
    } else {
3932
        env->fpus &= (~0x4700);
3933
        env->fpus |= 0x400;
3934
    }
3935
}
3936

    
3937
void helper_fptan(void)
3938
{
3939
    CPU86_LDouble fptemp;
3940

    
3941
    fptemp = ST0;
3942
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3943
        env->fpus |= 0x400;
3944
    } else {
3945
        ST0 = tan(fptemp);
3946
        fpush();
3947
        ST0 = 1.0;
3948
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3949
        /* the above code is for  |arg| < 2**52 only */
3950
    }
3951
}
3952

    
3953
void helper_fpatan(void)
3954
{
3955
    CPU86_LDouble fptemp, fpsrcop;
3956

    
3957
    fpsrcop = ST1;
3958
    fptemp = ST0;
3959
    ST1 = atan2(fpsrcop,fptemp);
3960
    fpop();
3961
}
3962

    
3963
void helper_fxtract(void)
3964
{
3965
    CPU86_LDoubleU temp;
3966
    unsigned int expdif;
3967

    
3968
    temp.d = ST0;
3969
    expdif = EXPD(temp) - EXPBIAS;
3970
    /*DP exponent bias*/
3971
    ST0 = expdif;
3972
    fpush();
3973
    BIASEXPONENT(temp);
3974
    ST0 = temp.d;
3975
}
3976

    
3977
void helper_fprem1(void)
3978
{
3979
    CPU86_LDouble dblq, fpsrcop, fptemp;
3980
    CPU86_LDoubleU fpsrcop1, fptemp1;
3981
    int expdif;
3982
    signed long long int q;
3983

    
3984
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3985
        ST0 = 0.0 / 0.0; /* NaN */
3986
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3987
        return;
3988
    }
3989

    
3990
    fpsrcop = ST0;
3991
    fptemp = ST1;
3992
    fpsrcop1.d = fpsrcop;
3993
    fptemp1.d = fptemp;
3994
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3995

    
3996
    if (expdif < 0) {
3997
        /* optimisation? taken from the AMD docs */
3998
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3999
        /* ST0 is unchanged */
4000
        return;
4001
    }
4002

    
4003
    if (expdif < 53) {
4004
        dblq = fpsrcop / fptemp;
4005
        /* round dblq towards nearest integer */
4006
        dblq = rint(dblq);
4007
        ST0 = fpsrcop - fptemp * dblq;
4008

    
4009
        /* convert dblq to q by truncating towards zero */
4010
        if (dblq < 0.0)
4011
           q = (signed long long int)(-dblq);
4012
        else
4013
           q = (signed long long int)dblq;
4014

    
4015
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4016
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4017
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4018
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4019
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4020
    } else {
4021
        env->fpus |= 0x400;  /* C2 <-- 1 */
4022
        fptemp = pow(2.0, expdif - 50);
4023
        fpsrcop = (ST0 / ST1) / fptemp;
4024
        /* fpsrcop = integer obtained by chopping */
4025
        fpsrcop = (fpsrcop < 0.0) ?
4026
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4027
        ST0 -= (ST1 * fpsrcop * fptemp);
4028
    }
4029
}
4030

    
4031
void helper_fprem(void)
4032
{
4033
    CPU86_LDouble dblq, fpsrcop, fptemp;
4034
    CPU86_LDoubleU fpsrcop1, fptemp1;
4035
    int expdif;
4036
    signed long long int q;
4037

    
4038
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4039
       ST0 = 0.0 / 0.0; /* NaN */
4040
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4041
       return;
4042
    }
4043

    
4044
    fpsrcop = (CPU86_LDouble)ST0;
4045
    fptemp = (CPU86_LDouble)ST1;
4046
    fpsrcop1.d = fpsrcop;
4047
    fptemp1.d = fptemp;
4048
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4049

    
4050
    if (expdif < 0) {
4051
        /* optimisation? taken from the AMD docs */
4052
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053
        /* ST0 is unchanged */
4054
        return;
4055
    }
4056

    
4057
    if ( expdif < 53 ) {
4058
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4059
        /* round dblq towards zero */
4060
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4061
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4062

    
4063
        /* convert dblq to q by truncating towards zero */
4064
        if (dblq < 0.0)
4065
           q = (signed long long int)(-dblq);
4066
        else
4067
           q = (signed long long int)dblq;
4068

    
4069
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4071
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4072
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4073
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4074
    } else {
4075
        int N = 32 + (expdif % 32); /* as per AMD docs */
4076
        env->fpus |= 0x400;  /* C2 <-- 1 */
4077
        fptemp = pow(2.0, (double)(expdif - N));
4078
        fpsrcop = (ST0 / ST1) / fptemp;
4079
        /* fpsrcop = integer obtained by chopping */
4080
        fpsrcop = (fpsrcop < 0.0) ?
4081
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4082
        ST0 -= (ST1 * fpsrcop * fptemp);
4083
    }
4084
}
4085

    
4086
void helper_fyl2xp1(void)
4087
{
4088
    CPU86_LDouble fptemp;
4089

    
4090
    fptemp = ST0;
4091
    if ((fptemp+1.0)>0.0) {
4092
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4093
        ST1 *= fptemp;
4094
        fpop();
4095
    } else {
4096
        env->fpus &= (~0x4700);
4097
        env->fpus |= 0x400;
4098
    }
4099
}
4100

    
4101
void helper_fsqrt(void)
4102
{
4103
    CPU86_LDouble fptemp;
4104

    
4105
    fptemp = ST0;
4106
    if (fptemp<0.0) {
4107
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4108
        env->fpus |= 0x400;
4109
    }
4110
    ST0 = sqrt(fptemp);
4111
}
4112

    
4113
void helper_fsincos(void)
4114
{
4115
    CPU86_LDouble fptemp;
4116

    
4117
    fptemp = ST0;
4118
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4119
        env->fpus |= 0x400;
4120
    } else {
4121
        ST0 = sin(fptemp);
4122
        fpush();
4123
        ST0 = cos(fptemp);
4124
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4125
        /* the above code is for  |arg| < 2**63 only */
4126
    }
4127
}
4128

    
4129
void helper_frndint(void)
4130
{
4131
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4132
}
4133

    
4134
void helper_fscale(void)
4135
{
4136
    ST0 = ldexp (ST0, (int)(ST1));
4137
}
4138

    
4139
void helper_fsin(void)
4140
{
4141
    CPU86_LDouble fptemp;
4142

    
4143
    fptemp = ST0;
4144
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4145
        env->fpus |= 0x400;
4146
    } else {
4147
        ST0 = sin(fptemp);
4148
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4149
        /* the above code is for  |arg| < 2**53 only */
4150
    }
4151
}
4152

    
4153
void helper_fcos(void)
4154
{
4155
    CPU86_LDouble fptemp;
4156

    
4157
    fptemp = ST0;
4158
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4159
        env->fpus |= 0x400;
4160
    } else {
4161
        ST0 = cos(fptemp);
4162
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4163
        /* the above code is for  |arg5 < 2**63 only */
4164
    }
4165
}
4166

    
4167
void helper_fxam_ST0(void)
4168
{
4169
    CPU86_LDoubleU temp;
4170
    int expdif;
4171

    
4172
    temp.d = ST0;
4173

    
4174
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4175
    if (SIGND(temp))
4176
        env->fpus |= 0x200; /* C1 <-- 1 */
4177

    
4178
    /* XXX: test fptags too */
4179
    expdif = EXPD(temp);
4180
    if (expdif == MAXEXPD) {
4181
#ifdef USE_X86LDOUBLE
4182
        if (MANTD(temp) == 0x8000000000000000ULL)
4183
#else
4184
        if (MANTD(temp) == 0)
4185
#endif
4186
            env->fpus |=  0x500 /*Infinity*/;
4187
        else
4188
            env->fpus |=  0x100 /*NaN*/;
4189
    } else if (expdif == 0) {
4190
        if (MANTD(temp) == 0)
4191
            env->fpus |=  0x4000 /*Zero*/;
4192
        else
4193
            env->fpus |= 0x4400 /*Denormal*/;
4194
    } else {
4195
        env->fpus |= 0x400;
4196
    }
4197
}
4198

    
4199
void helper_fstenv(target_ulong ptr, int data32)
4200
{
4201
    int fpus, fptag, exp, i;
4202
    uint64_t mant;
4203
    CPU86_LDoubleU tmp;
4204

    
4205
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4206
    fptag = 0;
4207
    for (i=7; i>=0; i--) {
4208
        fptag <<= 2;
4209
        if (env->fptags[i]) {
4210
            fptag |= 3;
4211
        } else {
4212
            tmp.d = env->fpregs[i].d;
4213
            exp = EXPD(tmp);
4214
            mant = MANTD(tmp);
4215
            if (exp == 0 && mant == 0) {
4216
                /* zero */
4217
                fptag |= 1;
4218
            } else if (exp == 0 || exp == MAXEXPD
4219
#ifdef USE_X86LDOUBLE
4220
                       || (mant & (1LL << 63)) == 0
4221
#endif
4222
                       ) {
4223
                /* NaNs, infinity, denormal */
4224
                fptag |= 2;
4225
            }
4226
        }
4227
    }
4228
    if (data32) {
4229
        /* 32 bit */
4230
        stl(ptr, env->fpuc);
4231
        stl(ptr + 4, fpus);
4232
        stl(ptr + 8, fptag);
4233
        stl(ptr + 12, 0); /* fpip */
4234
        stl(ptr + 16, 0); /* fpcs */
4235
        stl(ptr + 20, 0); /* fpoo */
4236
        stl(ptr + 24, 0); /* fpos */
4237
    } else {
4238
        /* 16 bit */
4239
        stw(ptr, env->fpuc);
4240
        stw(ptr + 2, fpus);
4241
        stw(ptr + 4, fptag);
4242
        stw(ptr + 6, 0);
4243
        stw(ptr + 8, 0);
4244
        stw(ptr + 10, 0);
4245
        stw(ptr + 12, 0);
4246
    }
4247
}
4248

    
4249
void helper_fldenv(target_ulong ptr, int data32)
4250
{
4251
    int i, fpus, fptag;
4252

    
4253
    if (data32) {
4254
        env->fpuc = lduw(ptr);
4255
        fpus = lduw(ptr + 4);
4256
        fptag = lduw(ptr + 8);
4257
    }
4258
    else {
4259
        env->fpuc = lduw(ptr);
4260
        fpus = lduw(ptr + 2);
4261
        fptag = lduw(ptr + 4);
4262
    }
4263
    env->fpstt = (fpus >> 11) & 7;
4264
    env->fpus = fpus & ~0x3800;
4265
    for(i = 0;i < 8; i++) {
4266
        env->fptags[i] = ((fptag & 3) == 3);
4267
        fptag >>= 2;
4268
    }
4269
}
4270

    
4271
void helper_fsave(target_ulong ptr, int data32)
4272
{
4273
    CPU86_LDouble tmp;
4274
    int i;
4275

    
4276
    helper_fstenv(ptr, data32);
4277

    
4278
    ptr += (14 << data32);
4279
    for(i = 0;i < 8; i++) {
4280
        tmp = ST(i);
4281
        helper_fstt(tmp, ptr);
4282
        ptr += 10;
4283
    }
4284

    
4285
    /* fninit */
4286
    env->fpus = 0;
4287
    env->fpstt = 0;
4288
    env->fpuc = 0x37f;
4289
    env->fptags[0] = 1;
4290
    env->fptags[1] = 1;
4291
    env->fptags[2] = 1;
4292
    env->fptags[3] = 1;
4293
    env->fptags[4] = 1;
4294
    env->fptags[5] = 1;
4295
    env->fptags[6] = 1;
4296
    env->fptags[7] = 1;
4297
}
4298

    
4299
void helper_frstor(target_ulong ptr, int data32)
4300
{
4301
    CPU86_LDouble tmp;
4302
    int i;
4303

    
4304
    helper_fldenv(ptr, data32);
4305
    ptr += (14 << data32);
4306

    
4307
    for(i = 0;i < 8; i++) {
4308
        tmp = helper_fldt(ptr);
4309
        ST(i) = tmp;
4310
        ptr += 10;
4311
    }
4312
}
4313

    
4314
void helper_fxsave(target_ulong ptr, int data64)
4315
{
4316
    int fpus, fptag, i, nb_xmm_regs;
4317
    CPU86_LDouble tmp;
4318
    target_ulong addr;
4319

    
4320
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4321
    fptag = 0;
4322
    for(i = 0; i < 8; i++) {
4323
        fptag |= (env->fptags[i] << i);
4324
    }
4325
    stw(ptr, env->fpuc);
4326
    stw(ptr + 2, fpus);
4327
    stw(ptr + 4, fptag ^ 0xff);
4328
#ifdef TARGET_X86_64
4329
    if (data64) {
4330
        stq(ptr + 0x08, 0); /* rip */
4331
        stq(ptr + 0x10, 0); /* rdp */
4332
    } else 
4333
#endif
4334
    {
4335
        stl(ptr + 0x08, 0); /* eip */
4336
        stl(ptr + 0x0c, 0); /* sel  */
4337
        stl(ptr + 0x10, 0); /* dp */
4338
        stl(ptr + 0x14, 0); /* sel  */
4339
    }
4340

    
4341
    addr = ptr + 0x20;
4342
    for(i = 0;i < 8; i++) {
4343
        tmp = ST(i);
4344
        helper_fstt(tmp, addr);
4345
        addr += 16;
4346
    }
4347

    
4348
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4349
        /* XXX: finish it */
4350
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4351
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4352
        if (env->hflags & HF_CS64_MASK)
4353
            nb_xmm_regs = 16;
4354
        else
4355
            nb_xmm_regs = 8;
4356
        addr = ptr + 0xa0;
4357
        /* Fast FXSAVE leaves out the XMM registers */
4358
        if (!(env->efer & MSR_EFER_FFXSR)
4359
          || (env->hflags & HF_CPL_MASK)
4360
          || !(env->hflags & HF_LMA_MASK)) {
4361
            for(i = 0; i < nb_xmm_regs; i++) {
4362
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4363
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4364
                addr += 16;
4365
            }
4366
        }
4367
    }
4368
}
4369

    
4370
void helper_fxrstor(target_ulong ptr, int data64)
4371
{
4372
    int i, fpus, fptag, nb_xmm_regs;
4373
    CPU86_LDouble tmp;
4374
    target_ulong addr;
4375

    
4376
    env->fpuc = lduw(ptr);
4377
    fpus = lduw(ptr + 2);
4378
    fptag = lduw(ptr + 4);
4379
    env->fpstt = (fpus >> 11) & 7;
4380
    env->fpus = fpus & ~0x3800;
4381
    fptag ^= 0xff;
4382
    for(i = 0;i < 8; i++) {
4383
        env->fptags[i] = ((fptag >> i) & 1);
4384
    }
4385

    
4386
    addr = ptr + 0x20;
4387
    for(i = 0;i < 8; i++) {
4388
        tmp = helper_fldt(addr);
4389
        ST(i) = tmp;
4390
        addr += 16;
4391
    }
4392

    
4393
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4394
        /* XXX: finish it */
4395
        env->mxcsr = ldl(ptr + 0x18);
4396
        //ldl(ptr + 0x1c);
4397
        if (env->hflags & HF_CS64_MASK)
4398
            nb_xmm_regs = 16;
4399
        else
4400
            nb_xmm_regs = 8;
4401
        addr = ptr + 0xa0;
4402
        /* Fast FXRESTORE leaves out the XMM registers */
4403
        if (!(env->efer & MSR_EFER_FFXSR)
4404
          || (env->hflags & HF_CPL_MASK)
4405
          || !(env->hflags & HF_LMA_MASK)) {
4406
            for(i = 0; i < nb_xmm_regs; i++) {
4407
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4408
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4409
                addr += 16;
4410
            }
4411
        }
4412
    }
4413
}
4414

    
4415
#ifndef USE_X86LDOUBLE
4416

    
4417
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4418
{
4419
    CPU86_LDoubleU temp;
4420
    int e;
4421

    
4422
    temp.d = f;
4423
    /* mantissa */
4424
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4425
    /* exponent + sign */
4426
    e = EXPD(temp) - EXPBIAS + 16383;
4427
    e |= SIGND(temp) >> 16;
4428
    *pexp = e;
4429
}
4430

    
4431
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4432
{
4433
    CPU86_LDoubleU temp;
4434
    int e;
4435
    uint64_t ll;
4436

    
4437
    /* XXX: handle overflow ? */
4438
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4439
    e |= (upper >> 4) & 0x800; /* sign */
4440
    ll = (mant >> 11) & ((1LL << 52) - 1);
4441
#ifdef __arm__
4442
    temp.l.upper = (e << 20) | (ll >> 32);
4443
    temp.l.lower = ll;
4444
#else
4445
    temp.ll = ll | ((uint64_t)e << 52);
4446
#endif
4447
    return temp.d;
4448
}
4449

    
4450
#else
4451

    
4452
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4453
{
4454
    CPU86_LDoubleU temp;
4455

    
4456
    temp.d = f;
4457
    *pmant = temp.l.lower;
4458
    *pexp = temp.l.upper;
4459
}
4460

    
4461
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4462
{
4463
    CPU86_LDoubleU temp;
4464

    
4465
    temp.l.upper = upper;
4466
    temp.l.lower = mant;
4467
    return temp.d;
4468
}
4469
#endif
4470

    
4471
#ifdef TARGET_X86_64
4472

    
4473
//#define DEBUG_MULDIV
4474

    
4475
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4476
{
4477
    *plow += a;
4478
    /* carry test */
4479
    if (*plow < a)
4480
        (*phigh)++;
4481
    *phigh += b;
4482
}
4483

    
4484
static void neg128(uint64_t *plow, uint64_t *phigh)
4485
{
4486
    *plow = ~ *plow;
4487
    *phigh = ~ *phigh;
4488
    add128(plow, phigh, 1, 0);
4489
}
4490

    
4491
/* return TRUE if overflow */
4492
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4493
{
4494
    uint64_t q, r, a1, a0;
4495
    int i, qb, ab;
4496

    
4497
    a0 = *plow;
4498
    a1 = *phigh;
4499
    if (a1 == 0) {
4500
        q = a0 / b;
4501
        r = a0 % b;
4502
        *plow = q;
4503
        *phigh = r;
4504
    } else {
4505
        if (a1 >= b)
4506
            return 1;
4507
        /* XXX: use a better algorithm */
4508
        for(i = 0; i < 64; i++) {
4509
            ab = a1 >> 63;
4510
            a1 = (a1 << 1) | (a0 >> 63);
4511
            if (ab || a1 >= b) {
4512
                a1 -= b;
4513
                qb = 1;
4514
            } else {
4515
                qb = 0;
4516
            }
4517
            a0 = (a0 << 1) | qb;
4518
        }
4519
#if defined(DEBUG_MULDIV)
4520
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4521
               *phigh, *plow, b, a0, a1);
4522
#endif
4523
        *plow = a0;
4524
        *phigh = a1;
4525
    }
4526
    return 0;
4527
}
4528

    
4529
/* return TRUE if overflow */
4530
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4531
{
4532
    int sa, sb;
4533
    sa = ((int64_t)*phigh < 0);
4534
    if (sa)
4535
        neg128(plow, phigh);
4536
    sb = (b < 0);
4537
    if (sb)
4538
        b = -b;
4539
    if (div64(plow, phigh, b) != 0)
4540
        return 1;
4541
    if (sa ^ sb) {
4542
        if (*plow > (1ULL << 63))
4543
            return 1;
4544
        *plow = - *plow;
4545
    } else {
4546
        if (*plow >= (1ULL << 63))
4547
            return 1;
4548
    }
4549
    if (sa)
4550
        *phigh = - *phigh;
4551
    return 0;
4552
}
4553

    
4554
void helper_mulq_EAX_T0(target_ulong t0)
4555
{
4556
    uint64_t r0, r1;
4557

    
4558
    mulu64(&r0, &r1, EAX, t0);
4559
    EAX = r0;
4560
    EDX = r1;
4561
    CC_DST = r0;
4562
    CC_SRC = r1;
4563
}
4564

    
4565
void helper_imulq_EAX_T0(target_ulong t0)
4566
{
4567
    uint64_t r0, r1;
4568

    
4569
    muls64(&r0, &r1, EAX, t0);
4570
    EAX = r0;
4571
    EDX = r1;
4572
    CC_DST = r0;
4573
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4574
}
4575

    
4576
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4577
{
4578
    uint64_t r0, r1;
4579

    
4580
    muls64(&r0, &r1, t0, t1);
4581
    CC_DST = r0;
4582
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4583
    return r0;
4584
}
4585

    
4586
void helper_divq_EAX(target_ulong t0)
4587
{
4588
    uint64_t r0, r1;
4589
    if (t0 == 0) {
4590
        raise_exception(EXCP00_DIVZ);
4591
    }
4592
    r0 = EAX;
4593
    r1 = EDX;
4594
    if (div64(&r0, &r1, t0))
4595
        raise_exception(EXCP00_DIVZ);
4596
    EAX = r0;
4597
    EDX = r1;
4598
}
4599

    
4600
void helper_idivq_EAX(target_ulong t0)
4601
{
4602
    uint64_t r0, r1;
4603
    if (t0 == 0) {
4604
        raise_exception(EXCP00_DIVZ);
4605
    }
4606
    r0 = EAX;
4607
    r1 = EDX;
4608
    if (idiv64(&r0, &r1, t0))
4609
        raise_exception(EXCP00_DIVZ);
4610
    EAX = r0;
4611
    EDX = r1;
4612
}
4613
#endif
4614

    
4615
static void do_hlt(void)
4616
{
4617
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4618
    env->halted = 1;
4619
    env->exception_index = EXCP_HLT;
4620
    cpu_loop_exit();
4621
}
4622

    
4623
void helper_hlt(int next_eip_addend)
4624
{
4625
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4626
    EIP += next_eip_addend;
4627
    
4628
    do_hlt();
4629
}
4630

    
4631
void helper_monitor(target_ulong ptr)
4632
{
4633
    if ((uint32_t)ECX != 0)
4634
        raise_exception(EXCP0D_GPF);
4635
    /* XXX: store address ? */
4636
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4637
}
4638

    
4639
void helper_mwait(int next_eip_addend)
4640
{
4641
    if ((uint32_t)ECX != 0)
4642
        raise_exception(EXCP0D_GPF);
4643
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4644
    EIP += next_eip_addend;
4645

    
4646
    /* XXX: not complete but not completely erroneous */
4647
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4648
        /* more than one CPU: do not sleep because another CPU may
4649
           wake this one */
4650
    } else {
4651
        do_hlt();
4652
    }
4653
}
4654

    
4655
void helper_debug(void)
4656
{
4657
    env->exception_index = EXCP_DEBUG;
4658
    cpu_loop_exit();
4659
}
4660

    
4661
void helper_raise_interrupt(int intno, int next_eip_addend)
4662
{
4663
    raise_interrupt(intno, 1, 0, next_eip_addend);
4664
}
4665

    
4666
void helper_raise_exception(int exception_index)
4667
{
4668
    raise_exception(exception_index);
4669
}
4670

    
4671
void helper_cli(void)
4672
{
4673
    env->eflags &= ~IF_MASK;
4674
}
4675

    
4676
void helper_sti(void)
4677
{
4678
    env->eflags |= IF_MASK;
4679
}
4680

    
4681
#if 0
4682
/* vm86plus instructions */
4683
void helper_cli_vm(void)
4684
{
4685
    env->eflags &= ~VIF_MASK;
4686
}
4687

4688
void helper_sti_vm(void)
4689
{
4690
    env->eflags |= VIF_MASK;
4691
    if (env->eflags & VIP_MASK) {
4692
        raise_exception(EXCP0D_GPF);
4693
    }
4694
}
4695
#endif
4696

    
4697
void helper_set_inhibit_irq(void)
4698
{
4699
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4700
}
4701

    
4702
void helper_reset_inhibit_irq(void)
4703
{
4704
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4705
}
4706

    
4707
void helper_boundw(target_ulong a0, int v)
4708
{
4709
    int low, high;
4710
    low = ldsw(a0);
4711
    high = ldsw(a0 + 2);
4712
    v = (int16_t)v;
4713
    if (v < low || v > high) {
4714
        raise_exception(EXCP05_BOUND);
4715
    }
4716
}
4717

    
4718
void helper_boundl(target_ulong a0, int v)
4719
{
4720
    int low, high;
4721
    low = ldl(a0);
4722
    high = ldl(a0 + 4);
4723
    if (v < low || v > high) {
4724
        raise_exception(EXCP05_BOUND);
4725
    }
4726
}
4727

    
4728
static float approx_rsqrt(float a)
4729
{
4730
    return 1.0 / sqrt(a);
4731
}
4732

    
4733
static float approx_rcp(float a)
4734
{
4735
    return 1.0 / a;
4736
}
4737

    
4738
#if !defined(CONFIG_USER_ONLY)
4739

    
4740
#define MMUSUFFIX _mmu
4741

    
4742
#define SHIFT 0
4743
#include "softmmu_template.h"
4744

    
4745
#define SHIFT 1
4746
#include "softmmu_template.h"
4747

    
4748
#define SHIFT 2
4749
#include "softmmu_template.h"
4750

    
4751
#define SHIFT 3
4752
#include "softmmu_template.h"
4753

    
4754
#endif
4755

    
4756
#if !defined(CONFIG_USER_ONLY)
4757
/* try to fill the TLB and return an exception if error. If retaddr is
4758
   NULL, it means that the function was called in C code (i.e. not
4759
   from generated code or from helper.c) */
4760
/* XXX: fix it to restore all registers */
4761
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4762
{
4763
    TranslationBlock *tb;
4764
    int ret;
4765
    unsigned long pc;
4766
    CPUX86State *saved_env;
4767

    
4768
    /* XXX: hack to restore env in all cases, even if not called from
4769
       generated code */
4770
    saved_env = env;
4771
    env = cpu_single_env;
4772

    
4773
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4774
    if (ret) {
4775
        if (retaddr) {
4776
            /* now we have a real cpu fault */
4777
            pc = (unsigned long)retaddr;
4778
            tb = tb_find_pc(pc);
4779
            if (tb) {
4780
                /* the PC is inside the translated code. It means that we have
4781
                   a virtual CPU fault */
4782
                cpu_restore_state(tb, env, pc, NULL);
4783
            }
4784
        }
4785
        raise_exception_err(env->exception_index, env->error_code);
4786
    }
4787
    env = saved_env;
4788
}
4789
#endif
4790

    
4791
/* Secure Virtual Machine helpers */
4792

    
4793
#if defined(CONFIG_USER_ONLY)
4794

    
4795
void helper_vmrun(int aflag, int next_eip_addend)
4796
{ 
4797
}
4798
void helper_vmmcall(void) 
4799
{ 
4800
}
4801
void helper_vmload(int aflag)
4802
{ 
4803
}
4804
void helper_vmsave(int aflag)
4805
{ 
4806
}
4807
void helper_stgi(void)
4808
{
4809
}
4810
void helper_clgi(void)
4811
{
4812
}
4813
void helper_skinit(void) 
4814
{ 
4815
}
4816
void helper_invlpga(int aflag)
4817
{ 
4818
}
4819
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4820
{ 
4821
}
4822
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4823
{
4824
}
4825

    
4826
void helper_svm_check_io(uint32_t port, uint32_t param, 
4827
                         uint32_t next_eip_addend)
4828
{
4829
}
4830
#else
4831

    
4832
static inline void svm_save_seg(target_phys_addr_t addr,
4833
                                const SegmentCache *sc)
4834
{
4835
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4836
             sc->selector);
4837
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4838
             sc->base);
4839
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4840
             sc->limit);
4841
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4842
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4843
}
4844
                                
4845
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4846
{
4847
    unsigned int flags;
4848

    
4849
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4850
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4851
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4852
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4853
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4854
}
4855

    
4856
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4857
                                      CPUState *env, int seg_reg)
4858
{
4859
    SegmentCache sc1, *sc = &sc1;
4860
    svm_load_seg(addr, sc);
4861
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4862
                           sc->base, sc->limit, sc->flags);
4863
}
4864

    
4865
void helper_vmrun(int aflag, int next_eip_addend)
4866
{
4867
    target_ulong addr;
4868
    uint32_t event_inj;
4869
    uint32_t int_ctl;
4870

    
4871
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4872

    
4873
    if (aflag == 2)
4874
        addr = EAX;
4875
    else
4876
        addr = (uint32_t)EAX;
4877

    
4878
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4879

    
4880
    env->vm_vmcb = addr;
4881

    
4882
    /* save the current CPU state in the hsave page */
4883
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4884
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4885

    
4886
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4887
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4888

    
4889
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4890
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4891
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4892
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4893
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4894
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4895

    
4896
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4897
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4898

    
4899
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4900
                  &env->segs[R_ES]);
4901
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4902
                 &env->segs[R_CS]);
4903
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4904
                 &env->segs[R_SS]);
4905
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4906
                 &env->segs[R_DS]);
4907

    
4908
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4909
             EIP + next_eip_addend);
4910
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4911
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4912

    
4913
    /* load the interception bitmaps so we do not need to access the
4914
       vmcb in svm mode */
4915
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4916
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4917
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4918
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4919
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4920
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4921

    
4922
    /* enable intercepts */
4923
    env->hflags |= HF_SVMI_MASK;
4924

    
4925
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4926

    
4927
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4928
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4929

    
4930
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4931
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4932

    
4933
    /* clear exit_info_2 so we behave like the real hardware */
4934
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4935

    
4936
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4937
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4938
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4939
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4940
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4941
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4942
    if (int_ctl & V_INTR_MASKING_MASK) {
4943
        env->v_tpr = int_ctl & V_TPR_MASK;
4944
        env->hflags2 |= HF2_VINTR_MASK;
4945
        if (env->eflags & IF_MASK)
4946
            env->hflags2 |= HF2_HIF_MASK;
4947
    }
4948

    
4949
    cpu_load_efer(env, 
4950
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4951
    env->eflags = 0;
4952
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4953
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4954
    CC_OP = CC_OP_EFLAGS;
4955

    
4956
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4957
                       env, R_ES);
4958
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4959
                       env, R_CS);
4960
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4961
                       env, R_SS);
4962
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4963
                       env, R_DS);
4964

    
4965
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4966
    env->eip = EIP;
4967
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4968
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4969
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4970
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4971
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4972

    
4973
    /* FIXME: guest state consistency checks */
4974

    
4975
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4976
        case TLB_CONTROL_DO_NOTHING:
4977
            break;
4978
        case TLB_CONTROL_FLUSH_ALL_ASID:
4979
            /* FIXME: this is not 100% correct but should work for now */
4980
            tlb_flush(env, 1);
4981
        break;
4982
    }
4983

    
4984
    env->hflags2 |= HF2_GIF_MASK;
4985

    
4986
    if (int_ctl & V_IRQ_MASK) {
4987
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4988
    }
4989

    
4990
    /* maybe we need to inject an event */
4991
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4992
    if (event_inj & SVM_EVTINJ_VALID) {
4993
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4994
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4995
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4996
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4997

    
4998
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4999
        /* FIXME: need to implement valid_err */
5000
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5001
        case SVM_EVTINJ_TYPE_INTR:
5002
                env->exception_index = vector;
5003
                env->error_code = event_inj_err;
5004
                env->exception_is_int = 0;
5005
                env->exception_next_eip = -1;
5006
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5007
                /* XXX: is it always correct ? */
5008
                do_interrupt(vector, 0, 0, 0, 1);
5009
                break;
5010
        case SVM_EVTINJ_TYPE_NMI:
5011
                env->exception_index = EXCP02_NMI;
5012
                env->error_code = event_inj_err;
5013
                env->exception_is_int = 0;
5014
                env->exception_next_eip = EIP;
5015
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5016
                cpu_loop_exit();
5017
                break;
5018
        case SVM_EVTINJ_TYPE_EXEPT:
5019
                env->exception_index = vector;
5020
                env->error_code = event_inj_err;
5021
                env->exception_is_int = 0;
5022
                env->exception_next_eip = -1;
5023
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5024
                cpu_loop_exit();
5025
                break;
5026
        case SVM_EVTINJ_TYPE_SOFT:
5027
                env->exception_index = vector;
5028
                env->error_code = event_inj_err;
5029
                env->exception_is_int = 1;
5030
                env->exception_next_eip = EIP;
5031
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5032
                cpu_loop_exit();
5033
                break;
5034
        }
5035
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5036
    }
5037
}
5038

    
5039
void helper_vmmcall(void)
5040
{
5041
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5042
    raise_exception(EXCP06_ILLOP);
5043
}
5044

    
5045
void helper_vmload(int aflag)
5046
{
5047
    target_ulong addr;
5048
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5049

    
5050
    if (aflag == 2)
5051
        addr = EAX;
5052
    else
5053
        addr = (uint32_t)EAX;
5054

    
5055
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5056
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5057
                env->segs[R_FS].base);
5058

    
5059
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5060
                       env, R_FS);
5061
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5062
                       env, R_GS);
5063
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5064
                 &env->tr);
5065
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5066
                 &env->ldt);
5067

    
5068
#ifdef TARGET_X86_64
5069
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5070
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5071
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5072
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5073
#endif
5074
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5075
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5076
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5077
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5078
}
5079

    
5080
void helper_vmsave(int aflag)
5081
{
5082
    target_ulong addr;
5083
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5084

    
5085
    if (aflag == 2)
5086
        addr = EAX;
5087
    else
5088
        addr = (uint32_t)EAX;
5089

    
5090
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5091
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5092
                env->segs[R_FS].base);
5093

    
5094
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5095
                 &env->segs[R_FS]);
5096
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5097
                 &env->segs[R_GS]);
5098
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5099
                 &env->tr);
5100
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5101
                 &env->ldt);
5102

    
5103
#ifdef TARGET_X86_64
5104
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5105
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5106
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5107
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5108
#endif
5109
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5110
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5111
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5112
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5113
}
5114

    
5115
void helper_stgi(void)
5116
{
5117
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5118
    env->hflags2 |= HF2_GIF_MASK;
5119
}
5120

    
5121
void helper_clgi(void)
5122
{
5123
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5124
    env->hflags2 &= ~HF2_GIF_MASK;
5125
}
5126

    
5127
void helper_skinit(void)
5128
{
5129
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5130
    /* XXX: not implemented */
5131
    raise_exception(EXCP06_ILLOP);
5132
}
5133

    
5134
void helper_invlpga(int aflag)
5135
{
5136
    target_ulong addr;
5137
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5138
    
5139
    if (aflag == 2)
5140
        addr = EAX;
5141
    else
5142
        addr = (uint32_t)EAX;
5143

    
5144
    /* XXX: could use the ASID to see if it is needed to do the
5145
       flush */
5146
    tlb_flush_page(env, addr);
5147
}
5148

    
5149
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5150
{
5151
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5152
        return;
5153
    switch(type) {
5154
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5155
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5156
            helper_vmexit(type, param);
5157
        }
5158
        break;
5159
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5160
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5161
            helper_vmexit(type, param);
5162
        }
5163
        break;
5164
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5165
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5166
            helper_vmexit(type, param);
5167
        }
5168
        break;
5169
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5170
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5171
            helper_vmexit(type, param);
5172
        }
5173
        break;
5174
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5175
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5176
            helper_vmexit(type, param);
5177
        }
5178
        break;
5179
    case SVM_EXIT_MSR:
5180
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5181
            /* FIXME: this should be read in at vmrun (faster this way?) */
5182
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5183
            uint32_t t0, t1;
5184
            switch((uint32_t)ECX) {
5185
            case 0 ... 0x1fff:
5186
                t0 = (ECX * 2) % 8;
5187
                t1 = ECX / 8;
5188
                break;
5189
            case 0xc0000000 ... 0xc0001fff:
5190
                t0 = (8192 + ECX - 0xc0000000) * 2;
5191
                t1 = (t0 / 8);
5192
                t0 %= 8;
5193
                break;
5194
            case 0xc0010000 ... 0xc0011fff:
5195
                t0 = (16384 + ECX - 0xc0010000) * 2;
5196
                t1 = (t0 / 8);
5197
                t0 %= 8;
5198
                break;
5199
            default:
5200
                helper_vmexit(type, param);
5201
                t0 = 0;
5202
                t1 = 0;
5203
                break;
5204
            }
5205
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5206
                helper_vmexit(type, param);
5207
        }
5208
        break;
5209
    default:
5210
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5211
            helper_vmexit(type, param);
5212
        }
5213
        break;
5214
    }
5215
}
5216

    
5217
void helper_svm_check_io(uint32_t port, uint32_t param, 
5218
                         uint32_t next_eip_addend)
5219
{
5220
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5221
        /* FIXME: this should be read in at vmrun (faster this way?) */
5222
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5223
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5224
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5225
            /* next EIP */
5226
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5227
                     env->eip + next_eip_addend);
5228
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5229
        }
5230
    }
5231
}
5232

    
5233
/* Note: currently only 32 bits of exit_code are used */
5234
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5235
{
5236
    uint32_t int_ctl;
5237

    
5238
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5239
                exit_code, exit_info_1,
5240
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5241
                EIP);
5242

    
5243
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5244
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5245
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5246
    } else {
5247
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5248
    }
5249

    
5250
    /* Save the VM state in the vmcb */
5251
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5252
                 &env->segs[R_ES]);
5253
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5254
                 &env->segs[R_CS]);
5255
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5256
                 &env->segs[R_SS]);
5257
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5258
                 &env->segs[R_DS]);
5259

    
5260
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5261
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5262

    
5263
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5264
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5265

    
5266
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5267
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5268
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5269
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5270
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5271

    
5272
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5273
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5274
    int_ctl |= env->v_tpr & V_TPR_MASK;
5275
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5276
        int_ctl |= V_IRQ_MASK;
5277
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5278

    
5279
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5280
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5281
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5282
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5283
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5284
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5285
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5286

    
5287
    /* Reload the host state from vm_hsave */
5288
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5289
    env->hflags &= ~HF_SVMI_MASK;
5290
    env->intercept = 0;
5291
    env->intercept_exceptions = 0;
5292
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5293
    env->tsc_offset = 0;
5294

    
5295
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5296
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5297

    
5298
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5299
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5300

    
5301
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5302
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5303
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5304
    /* we need to set the efer after the crs so the hidden flags get
5305
       set properly */
5306
    cpu_load_efer(env, 
5307
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5308
    env->eflags = 0;
5309
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5310
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5311
    CC_OP = CC_OP_EFLAGS;
5312

    
5313
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5314
                       env, R_ES);
5315
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5316
                       env, R_CS);
5317
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5318
                       env, R_SS);
5319
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5320
                       env, R_DS);
5321

    
5322
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5323
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5324
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5325

    
5326
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5327
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5328

    
5329
    /* other setups */
5330
    cpu_x86_set_cpl(env, 0);
5331
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5332
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5333

    
5334
    env->hflags2 &= ~HF2_GIF_MASK;
5335
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5336

    
5337
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5338

    
5339
    /* Clears the TSC_OFFSET inside the processor. */
5340

    
5341
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5342
       from the page table indicated the host's CR3. If the PDPEs contain
5343
       illegal state, the processor causes a shutdown. */
5344

    
5345
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5346
    env->cr[0] |= CR0_PE_MASK;
5347
    env->eflags &= ~VM_MASK;
5348

    
5349
    /* Disables all breakpoints in the host DR7 register. */
5350

    
5351
    /* Checks the reloaded host state for consistency. */
5352

    
5353
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5354
       host's code segment or non-canonical (in the case of long mode), a
5355
       #GP fault is delivered inside the host.) */
5356

    
5357
    /* remove any pending exception */
5358
    env->exception_index = -1;
5359
    env->error_code = 0;
5360
    env->old_exception = -1;
5361

    
5362
    cpu_loop_exit();
5363
}
5364

    
5365
#endif
5366

    
5367
/* MMX/SSE */
5368
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5369
void helper_enter_mmx(void)
5370
{
5371
    env->fpstt = 0;
5372
    *(uint32_t *)(env->fptags) = 0;
5373
    *(uint32_t *)(env->fptags + 4) = 0;
5374
}
5375

    
5376
void helper_emms(void)
5377
{
5378
    /* set to empty state */
5379
    *(uint32_t *)(env->fptags) = 0x01010101;
5380
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5381
}
5382

    
5383
/* XXX: suppress */
5384
void helper_movq(void *d, void *s)
5385
{
5386
    *(uint64_t *)d = *(uint64_t *)s;
5387
}
5388

    
5389
#define SHIFT 0
5390
#include "ops_sse.h"
5391

    
5392
#define SHIFT 1
5393
#include "ops_sse.h"
5394

    
5395
#define SHIFT 0
5396
#include "helper_template.h"
5397
#undef SHIFT
5398

    
5399
#define SHIFT 1
5400
#include "helper_template.h"
5401
#undef SHIFT
5402

    
5403
#define SHIFT 2
5404
#include "helper_template.h"
5405
#undef SHIFT
5406

    
5407
#ifdef TARGET_X86_64
5408

    
5409
#define SHIFT 3
5410
#include "helper_template.h"
5411
#undef SHIFT
5412

    
5413
#endif
5414

    
5415
/* bit operations */
5416
target_ulong helper_bsf(target_ulong t0)
5417
{
5418
    int count;
5419
    target_ulong res;
5420

    
5421
    res = t0;
5422
    count = 0;
5423
    while ((res & 1) == 0) {
5424
        count++;
5425
        res >>= 1;
5426
    }
5427
    return count;
5428
}
5429

    
5430
target_ulong helper_bsr(target_ulong t0)
5431
{
5432
    int count;
5433
    target_ulong res, mask;
5434
    
5435
    res = t0;
5436
    count = TARGET_LONG_BITS - 1;
5437
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5438
    while ((res & mask) == 0) {
5439
        count--;
5440
        res <<= 1;
5441
    }
5442
    return count;
5443
}
5444

    
5445

    
5446
static int compute_all_eflags(void)
5447
{
5448
    return CC_SRC;
5449
}
5450

    
5451
static int compute_c_eflags(void)
5452
{
5453
    return CC_SRC & CC_C;
5454
}
5455

    
5456
uint32_t helper_cc_compute_all(int op)
5457
{
5458
    switch (op) {
5459
    default: /* should never happen */ return 0;
5460

    
5461
    case CC_OP_EFLAGS: return compute_all_eflags();
5462

    
5463
    case CC_OP_MULB: return compute_all_mulb();
5464
    case CC_OP_MULW: return compute_all_mulw();
5465
    case CC_OP_MULL: return compute_all_mull();
5466

    
5467
    case CC_OP_ADDB: return compute_all_addb();
5468
    case CC_OP_ADDW: return compute_all_addw();
5469
    case CC_OP_ADDL: return compute_all_addl();
5470

    
5471
    case CC_OP_ADCB: return compute_all_adcb();
5472
    case CC_OP_ADCW: return compute_all_adcw();
5473
    case CC_OP_ADCL: return compute_all_adcl();
5474

    
5475
    case CC_OP_SUBB: return compute_all_subb();
5476
    case CC_OP_SUBW: return compute_all_subw();
5477
    case CC_OP_SUBL: return compute_all_subl();
5478

    
5479
    case CC_OP_SBBB: return compute_all_sbbb();
5480
    case CC_OP_SBBW: return compute_all_sbbw();
5481
    case CC_OP_SBBL: return compute_all_sbbl();
5482

    
5483
    case CC_OP_LOGICB: return compute_all_logicb();
5484
    case CC_OP_LOGICW: return compute_all_logicw();
5485
    case CC_OP_LOGICL: return compute_all_logicl();
5486

    
5487
    case CC_OP_INCB: return compute_all_incb();
5488
    case CC_OP_INCW: return compute_all_incw();
5489
    case CC_OP_INCL: return compute_all_incl();
5490

    
5491
    case CC_OP_DECB: return compute_all_decb();
5492
    case CC_OP_DECW: return compute_all_decw();
5493
    case CC_OP_DECL: return compute_all_decl();
5494

    
5495
    case CC_OP_SHLB: return compute_all_shlb();
5496
    case CC_OP_SHLW: return compute_all_shlw();
5497
    case CC_OP_SHLL: return compute_all_shll();
5498

    
5499
    case CC_OP_SARB: return compute_all_sarb();
5500
    case CC_OP_SARW: return compute_all_sarw();
5501
    case CC_OP_SARL: return compute_all_sarl();
5502

    
5503
#ifdef TARGET_X86_64
5504
    case CC_OP_MULQ: return compute_all_mulq();
5505

    
5506
    case CC_OP_ADDQ: return compute_all_addq();
5507

    
5508
    case CC_OP_ADCQ: return compute_all_adcq();
5509

    
5510
    case CC_OP_SUBQ: return compute_all_subq();
5511

    
5512
    case CC_OP_SBBQ: return compute_all_sbbq();
5513

    
5514
    case CC_OP_LOGICQ: return compute_all_logicq();
5515

    
5516
    case CC_OP_INCQ: return compute_all_incq();
5517

    
5518
    case CC_OP_DECQ: return compute_all_decq();
5519

    
5520
    case CC_OP_SHLQ: return compute_all_shlq();
5521

    
5522
    case CC_OP_SARQ: return compute_all_sarq();
5523
#endif
5524
    }
5525
}
5526

    
5527
uint32_t helper_cc_compute_c(int op)
5528
{
5529
    switch (op) {
5530
    default: /* should never happen */ return 0;
5531

    
5532
    case CC_OP_EFLAGS: return compute_c_eflags();
5533

    
5534
    case CC_OP_MULB: return compute_c_mull();
5535
    case CC_OP_MULW: return compute_c_mull();
5536
    case CC_OP_MULL: return compute_c_mull();
5537

    
5538
    case CC_OP_ADDB: return compute_c_addb();
5539
    case CC_OP_ADDW: return compute_c_addw();
5540
    case CC_OP_ADDL: return compute_c_addl();
5541

    
5542
    case CC_OP_ADCB: return compute_c_adcb();
5543
    case CC_OP_ADCW: return compute_c_adcw();
5544
    case CC_OP_ADCL: return compute_c_adcl();
5545

    
5546
    case CC_OP_SUBB: return compute_c_subb();
5547
    case CC_OP_SUBW: return compute_c_subw();
5548
    case CC_OP_SUBL: return compute_c_subl();
5549

    
5550
    case CC_OP_SBBB: return compute_c_sbbb();
5551
    case CC_OP_SBBW: return compute_c_sbbw();
5552
    case CC_OP_SBBL: return compute_c_sbbl();
5553

    
5554
    case CC_OP_LOGICB: return compute_c_logicb();
5555
    case CC_OP_LOGICW: return compute_c_logicw();
5556
    case CC_OP_LOGICL: return compute_c_logicl();
5557

    
5558
    case CC_OP_INCB: return compute_c_incl();
5559
    case CC_OP_INCW: return compute_c_incl();
5560
    case CC_OP_INCL: return compute_c_incl();
5561

    
5562
    case CC_OP_DECB: return compute_c_incl();
5563
    case CC_OP_DECW: return compute_c_incl();
5564
    case CC_OP_DECL: return compute_c_incl();
5565

    
5566
    case CC_OP_SHLB: return compute_c_shlb();
5567
    case CC_OP_SHLW: return compute_c_shlw();
5568
    case CC_OP_SHLL: return compute_c_shll();
5569

    
5570
    case CC_OP_SARB: return compute_c_sarl();
5571
    case CC_OP_SARW: return compute_c_sarl();
5572
    case CC_OP_SARL: return compute_c_sarl();
5573

    
5574
#ifdef TARGET_X86_64
5575
    case CC_OP_MULQ: return compute_c_mull();
5576

    
5577
    case CC_OP_ADDQ: return compute_c_addq();
5578

    
5579
    case CC_OP_ADCQ: return compute_c_adcq();
5580

    
5581
    case CC_OP_SUBQ: return compute_c_subq();
5582

    
5583
    case CC_OP_SBBQ: return compute_c_sbbq();
5584

    
5585
    case CC_OP_LOGICQ: return compute_c_logicq();
5586

    
5587
    case CC_OP_INCQ: return compute_c_incl();
5588

    
5589
    case CC_OP_DECQ: return compute_c_incl();
5590

    
5591
    case CC_OP_SHLQ: return compute_c_shlq();
5592

    
5593
    case CC_OP_SARQ: return compute_c_sarl();
5594
#endif
5595
    }
5596
}