Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 2ed51f5b

History | View | Annotate | Download (157.9 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352

    
353
    /* NOTE: we must avoid memory exceptions during the task switch,
354
       so we make dummy accesses before */
355
    /* XXX: it can still fail in some cases, so a bigger hack is
356
       necessary to valid the TLB after having done the accesses */
357

    
358
    v1 = ldub_kernel(env->tr.base);
359
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360
    stb_kernel(env->tr.base, v1);
361
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
362

    
363
    /* clear busy bit (it is restartable) */
364
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365
        target_ulong ptr;
366
        uint32_t e2;
367
        ptr = env->gdt.base + (env->tr.selector & ~7);
368
        e2 = ldl_kernel(ptr + 4);
369
        e2 &= ~DESC_TSS_BUSY_MASK;
370
        stl_kernel(ptr + 4, e2);
371
    }
372
    old_eflags = compute_eflags();
373
    if (source == SWITCH_TSS_IRET)
374
        old_eflags &= ~NT_MASK;
375

    
376
    /* save the current state in the old TSS */
377
    if (type & 8) {
378
        /* 32 bit */
379
        stl_kernel(env->tr.base + 0x20, next_eip);
380
        stl_kernel(env->tr.base + 0x24, old_eflags);
381
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389
        for(i = 0; i < 6; i++)
390
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391
    } else {
392
        /* 16 bit */
393
        stw_kernel(env->tr.base + 0x0e, next_eip);
394
        stw_kernel(env->tr.base + 0x10, old_eflags);
395
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403
        for(i = 0; i < 4; i++)
404
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405
    }
406

    
407
    /* now if an exception occurs, it will occurs in the next task
408
       context */
409

    
410
    if (source == SWITCH_TSS_CALL) {
411
        stw_kernel(tss_base, env->tr.selector);
412
        new_eflags |= NT_MASK;
413
    }
414

    
415
    /* set busy bit */
416
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417
        target_ulong ptr;
418
        uint32_t e2;
419
        ptr = env->gdt.base + (tss_selector & ~7);
420
        e2 = ldl_kernel(ptr + 4);
421
        e2 |= DESC_TSS_BUSY_MASK;
422
        stl_kernel(ptr + 4, e2);
423
    }
424

    
425
    /* set the new CPU state */
426
    /* from this point, any exception which occurs can give problems */
427
    env->cr[0] |= CR0_TS_MASK;
428
    env->hflags |= HF_TS_MASK;
429
    env->tr.selector = tss_selector;
430
    env->tr.base = tss_base;
431
    env->tr.limit = tss_limit;
432
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433

    
434
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435
        cpu_x86_update_cr3(env, new_cr3);
436
    }
437

    
438
    /* load all registers without an exception, then reload them with
439
       possible exception */
440
    env->eip = new_eip;
441
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443
    if (!(type & 8))
444
        eflags_mask &= 0xffff;
445
    load_eflags(new_eflags, eflags_mask);
446
    /* XXX: what to do in 16 bit case ? */
447
    EAX = new_regs[0];
448
    ECX = new_regs[1];
449
    EDX = new_regs[2];
450
    EBX = new_regs[3];
451
    ESP = new_regs[4];
452
    EBP = new_regs[5];
453
    ESI = new_regs[6];
454
    EDI = new_regs[7];
455
    if (new_eflags & VM_MASK) {
456
        for(i = 0; i < 6; i++)
457
            load_seg_vm(i, new_segs[i]);
458
        /* in vm86, CPL is always 3 */
459
        cpu_x86_set_cpl(env, 3);
460
    } else {
461
        /* CPL is set the RPL of CS */
462
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463
        /* first just selectors as the rest may trigger exceptions */
464
        for(i = 0; i < 6; i++)
465
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466
    }
467

    
468
    env->ldt.selector = new_ldt & ~4;
469
    env->ldt.base = 0;
470
    env->ldt.limit = 0;
471
    env->ldt.flags = 0;
472

    
473
    /* load the LDT */
474
    if (new_ldt & 4)
475
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476

    
477
    if ((new_ldt & 0xfffc) != 0) {
478
        dt = &env->gdt;
479
        index = new_ldt & ~7;
480
        if ((index + 7) > dt->limit)
481
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482
        ptr = dt->base + index;
483
        e1 = ldl_kernel(ptr);
484
        e2 = ldl_kernel(ptr + 4);
485
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487
        if (!(e2 & DESC_P_MASK))
488
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
490
    }
491

    
492
    /* load the segments */
493
    if (!(new_eflags & VM_MASK)) {
494
        tss_load_seg(R_CS, new_segs[R_CS]);
495
        tss_load_seg(R_SS, new_segs[R_SS]);
496
        tss_load_seg(R_ES, new_segs[R_ES]);
497
        tss_load_seg(R_DS, new_segs[R_DS]);
498
        tss_load_seg(R_FS, new_segs[R_FS]);
499
        tss_load_seg(R_GS, new_segs[R_GS]);
500
    }
501

    
502
    /* check that EIP is in the CS segment limits */
503
    if (new_eip > env->segs[R_CS].limit) {
504
        /* XXX: different exception if CALL ? */
505
        raise_exception_err(EXCP0D_GPF, 0);
506
    }
507

    
508
#ifndef CONFIG_USER_ONLY
509
    /* reset local breakpoints */
510
    if (env->dr[7] & 0x55) {
511
        for (i = 0; i < 4; i++) {
512
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513
                hw_breakpoint_remove(env, i);
514
        }
515
        env->dr[7] &= ~0x55;
516
    }
517
#endif
518
}
519

    
520
/* check if Port I/O is allowed in TSS */
521
static inline void check_io(int addr, int size)
522
{
523
    int io_offset, val, mask;
524

    
525
    /* TSS must be a valid 32 bit one */
526
    if (!(env->tr.flags & DESC_P_MASK) ||
527
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528
        env->tr.limit < 103)
529
        goto fail;
530
    io_offset = lduw_kernel(env->tr.base + 0x66);
531
    io_offset += (addr >> 3);
532
    /* Note: the check needs two bytes */
533
    if ((io_offset + 1) > env->tr.limit)
534
        goto fail;
535
    val = lduw_kernel(env->tr.base + io_offset);
536
    val >>= (addr & 7);
537
    mask = (1 << size) - 1;
538
    /* all bits must be zero to allow the I/O */
539
    if ((val & mask) != 0) {
540
    fail:
541
        raise_exception_err(EXCP0D_GPF, 0);
542
    }
543
}
544

    
545
void helper_check_iob(uint32_t t0)
546
{
547
    check_io(t0, 1);
548
}
549

    
550
void helper_check_iow(uint32_t t0)
551
{
552
    check_io(t0, 2);
553
}
554

    
555
void helper_check_iol(uint32_t t0)
556
{
557
    check_io(t0, 4);
558
}
559

    
560
void helper_outb(uint32_t port, uint32_t data)
561
{
562
    cpu_outb(env, port, data & 0xff);
563
}
564

    
565
target_ulong helper_inb(uint32_t port)
566
{
567
    return cpu_inb(env, port);
568
}
569

    
570
void helper_outw(uint32_t port, uint32_t data)
571
{
572
    cpu_outw(env, port, data & 0xffff);
573
}
574

    
575
target_ulong helper_inw(uint32_t port)
576
{
577
    return cpu_inw(env, port);
578
}
579

    
580
void helper_outl(uint32_t port, uint32_t data)
581
{
582
    cpu_outl(env, port, data);
583
}
584

    
585
target_ulong helper_inl(uint32_t port)
586
{
587
    return cpu_inl(env, port);
588
}
589

    
590
static inline unsigned int get_sp_mask(unsigned int e2)
591
{
592
    if (e2 & DESC_B_MASK)
593
        return 0xffffffff;
594
    else
595
        return 0xffff;
596
}
597

    
598
static int exeption_has_error_code(int intno)
599
{
600
        switch(intno) {
601
        case 8:
602
        case 10:
603
        case 11:
604
        case 12:
605
        case 13:
606
        case 14:
607
        case 17:
608
            return 1;
609
        }
610
        return 0;
611
}
612

    
613
#ifdef TARGET_X86_64
614
#define SET_ESP(val, sp_mask)\
615
do {\
616
    if ((sp_mask) == 0xffff)\
617
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
618
    else if ((sp_mask) == 0xffffffffLL)\
619
        ESP = (uint32_t)(val);\
620
    else\
621
        ESP = (val);\
622
} while (0)
623
#else
624
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
625
#endif
626

    
627
/* in 64-bit machines, this can overflow. So this segment addition macro
628
 * can be used to trim the value to 32-bit whenever needed */
629
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630

    
631
/* XXX: add a is_user flag to have proper security support */
632
#define PUSHW(ssp, sp, sp_mask, val)\
633
{\
634
    sp -= 2;\
635
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
636
}
637

    
638
#define PUSHL(ssp, sp, sp_mask, val)\
639
{\
640
    sp -= 4;\
641
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
642
}
643

    
644
#define POPW(ssp, sp, sp_mask, val)\
645
{\
646
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
647
    sp += 2;\
648
}
649

    
650
#define POPL(ssp, sp, sp_mask, val)\
651
{\
652
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
653
    sp += 4;\
654
}
655

    
656
/* protected mode interrupt */
657
static void do_interrupt_protected(int intno, int is_int, int error_code,
658
                                   unsigned int next_eip, int is_hw)
659
{
660
    SegmentCache *dt;
661
    target_ulong ptr, ssp;
662
    int type, dpl, selector, ss_dpl, cpl;
663
    int has_error_code, new_stack, shift;
664
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
665
    uint32_t old_eip, sp_mask;
666

    
667
    has_error_code = 0;
668
    if (!is_int && !is_hw)
669
        has_error_code = exeption_has_error_code(intno);
670
    if (is_int)
671
        old_eip = next_eip;
672
    else
673
        old_eip = env->eip;
674

    
675
    dt = &env->idt;
676
    if (intno * 8 + 7 > dt->limit)
677
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
678
    ptr = dt->base + intno * 8;
679
    e1 = ldl_kernel(ptr);
680
    e2 = ldl_kernel(ptr + 4);
681
    /* check gate type */
682
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
683
    switch(type) {
684
    case 5: /* task gate */
685
        /* must do that check here to return the correct error code */
686
        if (!(e2 & DESC_P_MASK))
687
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
688
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
689
        if (has_error_code) {
690
            int type;
691
            uint32_t mask;
692
            /* push the error code */
693
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
694
            shift = type >> 3;
695
            if (env->segs[R_SS].flags & DESC_B_MASK)
696
                mask = 0xffffffff;
697
            else
698
                mask = 0xffff;
699
            esp = (ESP - (2 << shift)) & mask;
700
            ssp = env->segs[R_SS].base + esp;
701
            if (shift)
702
                stl_kernel(ssp, error_code);
703
            else
704
                stw_kernel(ssp, error_code);
705
            SET_ESP(esp, mask);
706
        }
707
        return;
708
    case 6: /* 286 interrupt gate */
709
    case 7: /* 286 trap gate */
710
    case 14: /* 386 interrupt gate */
711
    case 15: /* 386 trap gate */
712
        break;
713
    default:
714
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
715
        break;
716
    }
717
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
718
    cpl = env->hflags & HF_CPL_MASK;
719
    /* check privilege if software int */
720
    if (is_int && dpl < cpl)
721
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
722
    /* check valid bit */
723
    if (!(e2 & DESC_P_MASK))
724
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
725
    selector = e1 >> 16;
726
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
727
    if ((selector & 0xfffc) == 0)
728
        raise_exception_err(EXCP0D_GPF, 0);
729

    
730
    if (load_segment(&e1, &e2, selector) != 0)
731
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
733
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
735
    if (dpl > cpl)
736
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737
    if (!(e2 & DESC_P_MASK))
738
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
739
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
740
        /* to inner privilege */
741
        get_ss_esp_from_tss(&ss, &esp, dpl);
742
        if ((ss & 0xfffc) == 0)
743
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744
        if ((ss & 3) != dpl)
745
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
749
        if (ss_dpl != dpl)
750
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751
        if (!(ss_e2 & DESC_S_MASK) ||
752
            (ss_e2 & DESC_CS_MASK) ||
753
            !(ss_e2 & DESC_W_MASK))
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        if (!(ss_e2 & DESC_P_MASK))
756
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757
        new_stack = 1;
758
        sp_mask = get_sp_mask(ss_e2);
759
        ssp = get_seg_base(ss_e1, ss_e2);
760
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
761
        /* to same privilege */
762
        if (env->eflags & VM_MASK)
763
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
764
        new_stack = 0;
765
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
766
        ssp = env->segs[R_SS].base;
767
        esp = ESP;
768
        dpl = cpl;
769
    } else {
770
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
771
        new_stack = 0; /* avoid warning */
772
        sp_mask = 0; /* avoid warning */
773
        ssp = 0; /* avoid warning */
774
        esp = 0; /* avoid warning */
775
    }
776

    
777
    shift = type >> 3;
778

    
779
#if 0
780
    /* XXX: check that enough room is available */
781
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
782
    if (env->eflags & VM_MASK)
783
        push_size += 8;
784
    push_size <<= shift;
785
#endif
786
    if (shift == 1) {
787
        if (new_stack) {
788
            if (env->eflags & VM_MASK) {
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
793
            }
794
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795
            PUSHL(ssp, esp, sp_mask, ESP);
796
        }
797
        PUSHL(ssp, esp, sp_mask, compute_eflags());
798
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799
        PUSHL(ssp, esp, sp_mask, old_eip);
800
        if (has_error_code) {
801
            PUSHL(ssp, esp, sp_mask, error_code);
802
        }
803
    } else {
804
        if (new_stack) {
805
            if (env->eflags & VM_MASK) {
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
810
            }
811
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812
            PUSHW(ssp, esp, sp_mask, ESP);
813
        }
814
        PUSHW(ssp, esp, sp_mask, compute_eflags());
815
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816
        PUSHW(ssp, esp, sp_mask, old_eip);
817
        if (has_error_code) {
818
            PUSHW(ssp, esp, sp_mask, error_code);
819
        }
820
    }
821

    
822
    if (new_stack) {
823
        if (env->eflags & VM_MASK) {
824
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
828
        }
829
        ss = (ss & ~3) | dpl;
830
        cpu_x86_load_seg_cache(env, R_SS, ss,
831
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
832
    }
833
    SET_ESP(esp, sp_mask);
834

    
835
    selector = (selector & ~3) | dpl;
836
    cpu_x86_load_seg_cache(env, R_CS, selector,
837
                   get_seg_base(e1, e2),
838
                   get_seg_limit(e1, e2),
839
                   e2);
840
    cpu_x86_set_cpl(env, dpl);
841
    env->eip = offset;
842

    
843
    /* interrupt gate clear IF mask */
844
    if ((type & 1) == 0) {
845
        env->eflags &= ~IF_MASK;
846
    }
847
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848
}
849

    
850
#ifdef TARGET_X86_64
851

    
852
#define PUSHQ(sp, val)\
853
{\
854
    sp -= 8;\
855
    stq_kernel(sp, (val));\
856
}
857

    
858
#define POPQ(sp, val)\
859
{\
860
    val = ldq_kernel(sp);\
861
    sp += 8;\
862
}
863

    
864
static inline target_ulong get_rsp_from_tss(int level)
865
{
866
    int index;
867

    
868
#if 0
869
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870
           env->tr.base, env->tr.limit);
871
#endif
872

    
873
    if (!(env->tr.flags & DESC_P_MASK))
874
        cpu_abort(env, "invalid tss");
875
    index = 8 * level + 4;
876
    if ((index + 7) > env->tr.limit)
877
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
878
    return ldq_kernel(env->tr.base + index);
879
}
880

    
881
/* 64 bit interrupt */
882
static void do_interrupt64(int intno, int is_int, int error_code,
883
                           target_ulong next_eip, int is_hw)
884
{
885
    SegmentCache *dt;
886
    target_ulong ptr;
887
    int type, dpl, selector, cpl, ist;
888
    int has_error_code, new_stack;
889
    uint32_t e1, e2, e3, ss;
890
    target_ulong old_eip, esp, offset;
891

    
892
    has_error_code = 0;
893
    if (!is_int && !is_hw)
894
        has_error_code = exeption_has_error_code(intno);
895
    if (is_int)
896
        old_eip = next_eip;
897
    else
898
        old_eip = env->eip;
899

    
900
    dt = &env->idt;
901
    if (intno * 16 + 15 > dt->limit)
902
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903
    ptr = dt->base + intno * 16;
904
    e1 = ldl_kernel(ptr);
905
    e2 = ldl_kernel(ptr + 4);
906
    e3 = ldl_kernel(ptr + 8);
907
    /* check gate type */
908
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
909
    switch(type) {
910
    case 14: /* 386 interrupt gate */
911
    case 15: /* 386 trap gate */
912
        break;
913
    default:
914
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
915
        break;
916
    }
917
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918
    cpl = env->hflags & HF_CPL_MASK;
919
    /* check privilege if software int */
920
    if (is_int && dpl < cpl)
921
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922
    /* check valid bit */
923
    if (!(e2 & DESC_P_MASK))
924
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
925
    selector = e1 >> 16;
926
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
927
    ist = e2 & 7;
928
    if ((selector & 0xfffc) == 0)
929
        raise_exception_err(EXCP0D_GPF, 0);
930

    
931
    if (load_segment(&e1, &e2, selector) != 0)
932
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
934
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936
    if (dpl > cpl)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_P_MASK))
939
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
940
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
943
        /* to inner privilege */
944
        if (ist != 0)
945
            esp = get_rsp_from_tss(ist + 3);
946
        else
947
            esp = get_rsp_from_tss(dpl);
948
        esp &= ~0xfLL; /* align stack */
949
        ss = 0;
950
        new_stack = 1;
951
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952
        /* to same privilege */
953
        if (env->eflags & VM_MASK)
954
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
        new_stack = 0;
956
        if (ist != 0)
957
            esp = get_rsp_from_tss(ist + 3);
958
        else
959
            esp = ESP;
960
        esp &= ~0xfLL; /* align stack */
961
        dpl = cpl;
962
    } else {
963
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964
        new_stack = 0; /* avoid warning */
965
        esp = 0; /* avoid warning */
966
    }
967

    
968
    PUSHQ(esp, env->segs[R_SS].selector);
969
    PUSHQ(esp, ESP);
970
    PUSHQ(esp, compute_eflags());
971
    PUSHQ(esp, env->segs[R_CS].selector);
972
    PUSHQ(esp, old_eip);
973
    if (has_error_code) {
974
        PUSHQ(esp, error_code);
975
    }
976

    
977
    if (new_stack) {
978
        ss = 0 | dpl;
979
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
980
    }
981
    ESP = esp;
982

    
983
    selector = (selector & ~3) | dpl;
984
    cpu_x86_load_seg_cache(env, R_CS, selector,
985
                   get_seg_base(e1, e2),
986
                   get_seg_limit(e1, e2),
987
                   e2);
988
    cpu_x86_set_cpl(env, dpl);
989
    env->eip = offset;
990

    
991
    /* interrupt gate clear IF mask */
992
    if ((type & 1) == 0) {
993
        env->eflags &= ~IF_MASK;
994
    }
995
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
996
}
997
#endif
998

    
999
#ifdef TARGET_X86_64
1000
#if defined(CONFIG_USER_ONLY)
1001
void helper_syscall(int next_eip_addend)
1002
{
1003
    env->exception_index = EXCP_SYSCALL;
1004
    env->exception_next_eip = env->eip + next_eip_addend;
1005
    cpu_loop_exit();
1006
}
1007
#else
1008
void helper_syscall(int next_eip_addend)
1009
{
1010
    int selector;
1011

    
1012
    if (!(env->efer & MSR_EFER_SCE)) {
1013
        raise_exception_err(EXCP06_ILLOP, 0);
1014
    }
1015
    selector = (env->star >> 32) & 0xffff;
1016
    if (env->hflags & HF_LMA_MASK) {
1017
        int code64;
1018

    
1019
        ECX = env->eip + next_eip_addend;
1020
        env->regs[11] = compute_eflags();
1021

    
1022
        code64 = env->hflags & HF_CS64_MASK;
1023

    
1024
        cpu_x86_set_cpl(env, 0);
1025
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026
                           0, 0xffffffff,
1027
                               DESC_G_MASK | DESC_P_MASK |
1028
                               DESC_S_MASK |
1029
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1030
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031
                               0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_W_MASK | DESC_A_MASK);
1035
        env->eflags &= ~env->fmask;
1036
        load_eflags(env->eflags, 0);
1037
        if (code64)
1038
            env->eip = env->lstar;
1039
        else
1040
            env->eip = env->cstar;
1041
    } else {
1042
        ECX = (uint32_t)(env->eip + next_eip_addend);
1043

    
1044
        cpu_x86_set_cpl(env, 0);
1045
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1046
                           0, 0xffffffff,
1047
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048
                               DESC_S_MASK |
1049
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1050
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1051
                               0, 0xffffffff,
1052
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053
                               DESC_S_MASK |
1054
                               DESC_W_MASK | DESC_A_MASK);
1055
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1056
        env->eip = (uint32_t)env->star;
1057
    }
1058
}
1059
#endif
1060
#endif
1061

    
1062
#ifdef TARGET_X86_64
1063
void helper_sysret(int dflag)
1064
{
1065
    int cpl, selector;
1066

    
1067
    if (!(env->efer & MSR_EFER_SCE)) {
1068
        raise_exception_err(EXCP06_ILLOP, 0);
1069
    }
1070
    cpl = env->hflags & HF_CPL_MASK;
1071
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1072
        raise_exception_err(EXCP0D_GPF, 0);
1073
    }
1074
    selector = (env->star >> 48) & 0xffff;
1075
    if (env->hflags & HF_LMA_MASK) {
1076
        if (dflag == 2) {
1077
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078
                                   0, 0xffffffff,
1079
                                   DESC_G_MASK | DESC_P_MASK |
1080
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082
                                   DESC_L_MASK);
1083
            env->eip = ECX;
1084
        } else {
1085
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086
                                   0, 0xffffffff,
1087
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090
            env->eip = (uint32_t)ECX;
1091
        }
1092
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093
                               0, 0xffffffff,
1094
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096
                               DESC_W_MASK | DESC_A_MASK);
1097
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
        cpu_x86_set_cpl(env, 3);
1100
    } else {
1101
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102
                               0, 0xffffffff,
1103
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106
        env->eip = (uint32_t)ECX;
1107
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108
                               0, 0xffffffff,
1109
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111
                               DESC_W_MASK | DESC_A_MASK);
1112
        env->eflags |= IF_MASK;
1113
        cpu_x86_set_cpl(env, 3);
1114
    }
1115
#ifdef CONFIG_KQEMU
1116
    if (kqemu_is_ok(env)) {
1117
        if (env->hflags & HF_LMA_MASK)
1118
            CC_OP = CC_OP_EFLAGS;
1119
        env->exception_index = -1;
1120
        cpu_loop_exit();
1121
    }
1122
#endif
1123
}
1124
#endif
1125

    
1126
/* real mode interrupt */
1127
static void do_interrupt_real(int intno, int is_int, int error_code,
1128
                              unsigned int next_eip)
1129
{
1130
    SegmentCache *dt;
1131
    target_ulong ptr, ssp;
1132
    int selector;
1133
    uint32_t offset, esp;
1134
    uint32_t old_cs, old_eip;
1135

    
1136
    /* real mode (simpler !) */
1137
    dt = &env->idt;
1138
    if (intno * 4 + 3 > dt->limit)
1139
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1140
    ptr = dt->base + intno * 4;
1141
    offset = lduw_kernel(ptr);
1142
    selector = lduw_kernel(ptr + 2);
1143
    esp = ESP;
1144
    ssp = env->segs[R_SS].base;
1145
    if (is_int)
1146
        old_eip = next_eip;
1147
    else
1148
        old_eip = env->eip;
1149
    old_cs = env->segs[R_CS].selector;
1150
    /* XXX: use SS segment size ? */
1151
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1152
    PUSHW(ssp, esp, 0xffff, old_cs);
1153
    PUSHW(ssp, esp, 0xffff, old_eip);
1154

    
1155
    /* update processor state */
1156
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1157
    env->eip = offset;
1158
    env->segs[R_CS].selector = selector;
1159
    env->segs[R_CS].base = (selector << 4);
1160
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1161
}
1162

    
1163
/* fake user mode interrupt */
1164
void do_interrupt_user(int intno, int is_int, int error_code,
1165
                       target_ulong next_eip)
1166
{
1167
    SegmentCache *dt;
1168
    target_ulong ptr;
1169
    int dpl, cpl, shift;
1170
    uint32_t e2;
1171

    
1172
    dt = &env->idt;
1173
    if (env->hflags & HF_LMA_MASK) {
1174
        shift = 4;
1175
    } else {
1176
        shift = 3;
1177
    }
1178
    ptr = dt->base + (intno << shift);
1179
    e2 = ldl_kernel(ptr + 4);
1180

    
1181
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1182
    cpl = env->hflags & HF_CPL_MASK;
1183
    /* check privilege if software int */
1184
    if (is_int && dpl < cpl)
1185
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1186

    
1187
    /* Since we emulate only user space, we cannot do more than
1188
       exiting the emulation with the suitable exception and error
1189
       code */
1190
    if (is_int)
1191
        EIP = next_eip;
1192
}
1193

    
1194
static void handle_even_inj(int intno, int is_int, int error_code,
1195
                int is_hw, int rm)
1196
{
1197
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1198
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1199
            int type;
1200
            if (is_int)
1201
                    type = SVM_EVTINJ_TYPE_SOFT;
1202
            else
1203
                    type = SVM_EVTINJ_TYPE_EXEPT;
1204
            event_inj = intno | type | SVM_EVTINJ_VALID;
1205
            if (!rm && exeption_has_error_code(intno)) {
1206
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1207
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1208
            }
1209
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1210
    }
1211
}
1212

    
1213
/*
1214
 * Begin execution of an interruption. is_int is TRUE if coming from
1215
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1216
 * instruction. It is only relevant if is_int is TRUE.
1217
 */
1218
void do_interrupt(int intno, int is_int, int error_code,
1219
                  target_ulong next_eip, int is_hw)
1220
{
1221
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1222
        if ((env->cr[0] & CR0_PE_MASK)) {
1223
            static int count;
1224
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1225
                    count, intno, error_code, is_int,
1226
                    env->hflags & HF_CPL_MASK,
1227
                    env->segs[R_CS].selector, EIP,
1228
                    (int)env->segs[R_CS].base + EIP,
1229
                    env->segs[R_SS].selector, ESP);
1230
            if (intno == 0x0e) {
1231
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1232
            } else {
1233
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1234
            }
1235
            qemu_log("\n");
1236
            log_cpu_state(env, X86_DUMP_CCOP);
1237
#if 0
1238
            {
1239
                int i;
1240
                uint8_t *ptr;
1241
                qemu_log("       code=");
1242
                ptr = env->segs[R_CS].base + env->eip;
1243
                for(i = 0; i < 16; i++) {
1244
                    qemu_log(" %02x", ldub(ptr + i));
1245
                }
1246
                qemu_log("\n");
1247
            }
1248
#endif
1249
            count++;
1250
        }
1251
    }
1252
    if (env->cr[0] & CR0_PE_MASK) {
1253
        if (env->hflags & HF_SVMI_MASK)
1254
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1255
#ifdef TARGET_X86_64
1256
        if (env->hflags & HF_LMA_MASK) {
1257
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258
        } else
1259
#endif
1260
        {
1261
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262
        }
1263
    } else {
1264
        if (env->hflags & HF_SVMI_MASK)
1265
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1266
        do_interrupt_real(intno, is_int, error_code, next_eip);
1267
    }
1268

    
1269
    if (env->hflags & HF_SVMI_MASK) {
1270
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1271
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1272
    }
1273
}
1274

    
1275
/* This should come from sysemu.h - if we could include it here... */
1276
void qemu_system_reset_request(void);
1277

    
1278
/*
1279
 * Check nested exceptions and change to double or triple fault if
1280
 * needed. It should only be called, if this is not an interrupt.
1281
 * Returns the new exception number.
1282
 */
1283
static int check_exception(int intno, int *error_code)
1284
{
1285
    int first_contributory = env->old_exception == 0 ||
1286
                              (env->old_exception >= 10 &&
1287
                               env->old_exception <= 13);
1288
    int second_contributory = intno == 0 ||
1289
                               (intno >= 10 && intno <= 13);
1290

    
1291
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1292
                env->old_exception, intno);
1293

    
1294
#if !defined(CONFIG_USER_ONLY)
1295
    if (env->old_exception == EXCP08_DBLE) {
1296
        if (env->hflags & HF_SVMI_MASK)
1297
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1298

    
1299
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1300

    
1301
        qemu_system_reset_request();
1302
        return EXCP_HLT;
1303
    }
1304
#endif
1305

    
1306
    if ((first_contributory && second_contributory)
1307
        || (env->old_exception == EXCP0E_PAGE &&
1308
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1309
        intno = EXCP08_DBLE;
1310
        *error_code = 0;
1311
    }
1312

    
1313
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1314
        (intno == EXCP08_DBLE))
1315
        env->old_exception = intno;
1316

    
1317
    return intno;
1318
}
1319

    
1320
/*
1321
 * Signal an interruption. It is executed in the main CPU loop.
1322
 * is_int is TRUE if coming from the int instruction. next_eip is the
1323
 * EIP value AFTER the interrupt instruction. It is only relevant if
1324
 * is_int is TRUE.
1325
 */
1326
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1327
                                          int next_eip_addend)
1328
{
1329
    if (!is_int) {
1330
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1331
        intno = check_exception(intno, &error_code);
1332
    } else {
1333
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1334
    }
1335

    
1336
    env->exception_index = intno;
1337
    env->error_code = error_code;
1338
    env->exception_is_int = is_int;
1339
    env->exception_next_eip = env->eip + next_eip_addend;
1340
    cpu_loop_exit();
1341
}
1342

    
1343
/* shortcuts to generate exceptions */
1344

    
1345
void raise_exception_err(int exception_index, int error_code)
1346
{
1347
    raise_interrupt(exception_index, 0, error_code, 0);
1348
}
1349

    
1350
void raise_exception(int exception_index)
1351
{
1352
    raise_interrupt(exception_index, 0, 0, 0);
1353
}
1354

    
1355
/* SMM support */
1356

    
1357
#if defined(CONFIG_USER_ONLY)
1358

    
1359
void do_smm_enter(void)
1360
{
1361
}
1362

    
1363
void helper_rsm(void)
1364
{
1365
}
1366

    
1367
#else
1368

    
1369
#ifdef TARGET_X86_64
1370
#define SMM_REVISION_ID 0x00020064
1371
#else
1372
#define SMM_REVISION_ID 0x00020000
1373
#endif
1374

    
1375
void do_smm_enter(void)
1376
{
1377
    target_ulong sm_state;
1378
    SegmentCache *dt;
1379
    int i, offset;
1380

    
1381
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1382
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1383

    
1384
    env->hflags |= HF_SMM_MASK;
1385
    cpu_smm_update(env);
1386

    
1387
    sm_state = env->smbase + 0x8000;
1388

    
1389
#ifdef TARGET_X86_64
1390
    for(i = 0; i < 6; i++) {
1391
        dt = &env->segs[i];
1392
        offset = 0x7e00 + i * 16;
1393
        stw_phys(sm_state + offset, dt->selector);
1394
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1395
        stl_phys(sm_state + offset + 4, dt->limit);
1396
        stq_phys(sm_state + offset + 8, dt->base);
1397
    }
1398

    
1399
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1400
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1401

    
1402
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1403
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1404
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1405
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1406

    
1407
    stq_phys(sm_state + 0x7e88, env->idt.base);
1408
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1409

    
1410
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1411
    stq_phys(sm_state + 0x7e98, env->tr.base);
1412
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1413
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1414

    
1415
    stq_phys(sm_state + 0x7ed0, env->efer);
1416

    
1417
    stq_phys(sm_state + 0x7ff8, EAX);
1418
    stq_phys(sm_state + 0x7ff0, ECX);
1419
    stq_phys(sm_state + 0x7fe8, EDX);
1420
    stq_phys(sm_state + 0x7fe0, EBX);
1421
    stq_phys(sm_state + 0x7fd8, ESP);
1422
    stq_phys(sm_state + 0x7fd0, EBP);
1423
    stq_phys(sm_state + 0x7fc8, ESI);
1424
    stq_phys(sm_state + 0x7fc0, EDI);
1425
    for(i = 8; i < 16; i++)
1426
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1427
    stq_phys(sm_state + 0x7f78, env->eip);
1428
    stl_phys(sm_state + 0x7f70, compute_eflags());
1429
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1430
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1431

    
1432
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1433
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1434
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1435

    
1436
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437
    stl_phys(sm_state + 0x7f00, env->smbase);
1438
#else
1439
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1440
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1441
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1442
    stl_phys(sm_state + 0x7ff0, env->eip);
1443
    stl_phys(sm_state + 0x7fec, EDI);
1444
    stl_phys(sm_state + 0x7fe8, ESI);
1445
    stl_phys(sm_state + 0x7fe4, EBP);
1446
    stl_phys(sm_state + 0x7fe0, ESP);
1447
    stl_phys(sm_state + 0x7fdc, EBX);
1448
    stl_phys(sm_state + 0x7fd8, EDX);
1449
    stl_phys(sm_state + 0x7fd4, ECX);
1450
    stl_phys(sm_state + 0x7fd0, EAX);
1451
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1452
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1453

    
1454
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1455
    stl_phys(sm_state + 0x7f64, env->tr.base);
1456
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1457
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1458

    
1459
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1460
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1461
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1462
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1463

    
1464
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1465
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1466

    
1467
    stl_phys(sm_state + 0x7f58, env->idt.base);
1468
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1469

    
1470
    for(i = 0; i < 6; i++) {
1471
        dt = &env->segs[i];
1472
        if (i < 3)
1473
            offset = 0x7f84 + i * 12;
1474
        else
1475
            offset = 0x7f2c + (i - 3) * 12;
1476
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1477
        stl_phys(sm_state + offset + 8, dt->base);
1478
        stl_phys(sm_state + offset + 4, dt->limit);
1479
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1480
    }
1481
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1482

    
1483
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1484
    stl_phys(sm_state + 0x7ef8, env->smbase);
1485
#endif
1486
    /* init SMM cpu state */
1487

    
1488
#ifdef TARGET_X86_64
1489
    cpu_load_efer(env, 0);
1490
#endif
1491
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1492
    env->eip = 0x00008000;
1493
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1494
                           0xffffffff, 0);
1495
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1496
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1497
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1498
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1499
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1500

    
1501
    cpu_x86_update_cr0(env,
1502
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1503
    cpu_x86_update_cr4(env, 0);
1504
    env->dr[7] = 0x00000400;
1505
    CC_OP = CC_OP_EFLAGS;
1506
}
1507

    
1508
void helper_rsm(void)
1509
{
1510
    target_ulong sm_state;
1511
    int i, offset;
1512
    uint32_t val;
1513

    
1514
    sm_state = env->smbase + 0x8000;
1515
#ifdef TARGET_X86_64
1516
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1517

    
1518
    for(i = 0; i < 6; i++) {
1519
        offset = 0x7e00 + i * 16;
1520
        cpu_x86_load_seg_cache(env, i,
1521
                               lduw_phys(sm_state + offset),
1522
                               ldq_phys(sm_state + offset + 8),
1523
                               ldl_phys(sm_state + offset + 4),
1524
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1525
    }
1526

    
1527
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1528
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1529

    
1530
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1531
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1532
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1533
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1534

    
1535
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1536
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1537

    
1538
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1539
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1540
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1541
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1542

    
1543
    EAX = ldq_phys(sm_state + 0x7ff8);
1544
    ECX = ldq_phys(sm_state + 0x7ff0);
1545
    EDX = ldq_phys(sm_state + 0x7fe8);
1546
    EBX = ldq_phys(sm_state + 0x7fe0);
1547
    ESP = ldq_phys(sm_state + 0x7fd8);
1548
    EBP = ldq_phys(sm_state + 0x7fd0);
1549
    ESI = ldq_phys(sm_state + 0x7fc8);
1550
    EDI = ldq_phys(sm_state + 0x7fc0);
1551
    for(i = 8; i < 16; i++)
1552
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1553
    env->eip = ldq_phys(sm_state + 0x7f78);
1554
    load_eflags(ldl_phys(sm_state + 0x7f70),
1555
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1556
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1557
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1558

    
1559
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1560
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1561
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1562

    
1563
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1564
    if (val & 0x20000) {
1565
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1566
    }
1567
#else
1568
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1569
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1570
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1571
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1572
    env->eip = ldl_phys(sm_state + 0x7ff0);
1573
    EDI = ldl_phys(sm_state + 0x7fec);
1574
    ESI = ldl_phys(sm_state + 0x7fe8);
1575
    EBP = ldl_phys(sm_state + 0x7fe4);
1576
    ESP = ldl_phys(sm_state + 0x7fe0);
1577
    EBX = ldl_phys(sm_state + 0x7fdc);
1578
    EDX = ldl_phys(sm_state + 0x7fd8);
1579
    ECX = ldl_phys(sm_state + 0x7fd4);
1580
    EAX = ldl_phys(sm_state + 0x7fd0);
1581
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1582
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1583

    
1584
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1585
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1586
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1587
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1588

    
1589
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1590
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1591
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1592
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1593

    
1594
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1595
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1596

    
1597
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1598
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1599

    
1600
    for(i = 0; i < 6; i++) {
1601
        if (i < 3)
1602
            offset = 0x7f84 + i * 12;
1603
        else
1604
            offset = 0x7f2c + (i - 3) * 12;
1605
        cpu_x86_load_seg_cache(env, i,
1606
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1607
                               ldl_phys(sm_state + offset + 8),
1608
                               ldl_phys(sm_state + offset + 4),
1609
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1610
    }
1611
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1612

    
1613
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1614
    if (val & 0x20000) {
1615
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1616
    }
1617
#endif
1618
    CC_OP = CC_OP_EFLAGS;
1619
    env->hflags &= ~HF_SMM_MASK;
1620
    cpu_smm_update(env);
1621

    
1622
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1623
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1624
}
1625

    
1626
#endif /* !CONFIG_USER_ONLY */
1627

    
1628

    
1629
/* division, flags are undefined */
1630

    
1631
void helper_divb_AL(target_ulong t0)
1632
{
1633
    unsigned int num, den, q, r;
1634

    
1635
    num = (EAX & 0xffff);
1636
    den = (t0 & 0xff);
1637
    if (den == 0) {
1638
        raise_exception(EXCP00_DIVZ);
1639
    }
1640
    q = (num / den);
1641
    if (q > 0xff)
1642
        raise_exception(EXCP00_DIVZ);
1643
    q &= 0xff;
1644
    r = (num % den) & 0xff;
1645
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1646
}
1647

    
1648
void helper_idivb_AL(target_ulong t0)
1649
{
1650
    int num, den, q, r;
1651

    
1652
    num = (int16_t)EAX;
1653
    den = (int8_t)t0;
1654
    if (den == 0) {
1655
        raise_exception(EXCP00_DIVZ);
1656
    }
1657
    q = (num / den);
1658
    if (q != (int8_t)q)
1659
        raise_exception(EXCP00_DIVZ);
1660
    q &= 0xff;
1661
    r = (num % den) & 0xff;
1662
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1663
}
1664

    
1665
void helper_divw_AX(target_ulong t0)
1666
{
1667
    unsigned int num, den, q, r;
1668

    
1669
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1670
    den = (t0 & 0xffff);
1671
    if (den == 0) {
1672
        raise_exception(EXCP00_DIVZ);
1673
    }
1674
    q = (num / den);
1675
    if (q > 0xffff)
1676
        raise_exception(EXCP00_DIVZ);
1677
    q &= 0xffff;
1678
    r = (num % den) & 0xffff;
1679
    EAX = (EAX & ~0xffff) | q;
1680
    EDX = (EDX & ~0xffff) | r;
1681
}
1682

    
1683
void helper_idivw_AX(target_ulong t0)
1684
{
1685
    int num, den, q, r;
1686

    
1687
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1688
    den = (int16_t)t0;
1689
    if (den == 0) {
1690
        raise_exception(EXCP00_DIVZ);
1691
    }
1692
    q = (num / den);
1693
    if (q != (int16_t)q)
1694
        raise_exception(EXCP00_DIVZ);
1695
    q &= 0xffff;
1696
    r = (num % den) & 0xffff;
1697
    EAX = (EAX & ~0xffff) | q;
1698
    EDX = (EDX & ~0xffff) | r;
1699
}
1700

    
1701
void helper_divl_EAX(target_ulong t0)
1702
{
1703
    unsigned int den, r;
1704
    uint64_t num, q;
1705

    
1706
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1707
    den = t0;
1708
    if (den == 0) {
1709
        raise_exception(EXCP00_DIVZ);
1710
    }
1711
    q = (num / den);
1712
    r = (num % den);
1713
    if (q > 0xffffffff)
1714
        raise_exception(EXCP00_DIVZ);
1715
    EAX = (uint32_t)q;
1716
    EDX = (uint32_t)r;
1717
}
1718

    
1719
void helper_idivl_EAX(target_ulong t0)
1720
{
1721
    int den, r;
1722
    int64_t num, q;
1723

    
1724
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1725
    den = t0;
1726
    if (den == 0) {
1727
        raise_exception(EXCP00_DIVZ);
1728
    }
1729
    q = (num / den);
1730
    r = (num % den);
1731
    if (q != (int32_t)q)
1732
        raise_exception(EXCP00_DIVZ);
1733
    EAX = (uint32_t)q;
1734
    EDX = (uint32_t)r;
1735
}
1736

    
1737
/* bcd */
1738

    
1739
/* XXX: exception */
1740
void helper_aam(int base)
1741
{
1742
    int al, ah;
1743
    al = EAX & 0xff;
1744
    ah = al / base;
1745
    al = al % base;
1746
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1747
    CC_DST = al;
1748
}
1749

    
1750
void helper_aad(int base)
1751
{
1752
    int al, ah;
1753
    al = EAX & 0xff;
1754
    ah = (EAX >> 8) & 0xff;
1755
    al = ((ah * base) + al) & 0xff;
1756
    EAX = (EAX & ~0xffff) | al;
1757
    CC_DST = al;
1758
}
1759

    
1760
void helper_aaa(void)
1761
{
1762
    int icarry;
1763
    int al, ah, af;
1764
    int eflags;
1765

    
1766
    eflags = helper_cc_compute_all(CC_OP);
1767
    af = eflags & CC_A;
1768
    al = EAX & 0xff;
1769
    ah = (EAX >> 8) & 0xff;
1770

    
1771
    icarry = (al > 0xf9);
1772
    if (((al & 0x0f) > 9 ) || af) {
1773
        al = (al + 6) & 0x0f;
1774
        ah = (ah + 1 + icarry) & 0xff;
1775
        eflags |= CC_C | CC_A;
1776
    } else {
1777
        eflags &= ~(CC_C | CC_A);
1778
        al &= 0x0f;
1779
    }
1780
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1781
    CC_SRC = eflags;
1782
}
1783

    
1784
void helper_aas(void)
1785
{
1786
    int icarry;
1787
    int al, ah, af;
1788
    int eflags;
1789

    
1790
    eflags = helper_cc_compute_all(CC_OP);
1791
    af = eflags & CC_A;
1792
    al = EAX & 0xff;
1793
    ah = (EAX >> 8) & 0xff;
1794

    
1795
    icarry = (al < 6);
1796
    if (((al & 0x0f) > 9 ) || af) {
1797
        al = (al - 6) & 0x0f;
1798
        ah = (ah - 1 - icarry) & 0xff;
1799
        eflags |= CC_C | CC_A;
1800
    } else {
1801
        eflags &= ~(CC_C | CC_A);
1802
        al &= 0x0f;
1803
    }
1804
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1805
    CC_SRC = eflags;
1806
}
1807

    
1808
void helper_daa(void)
1809
{
1810
    int al, af, cf;
1811
    int eflags;
1812

    
1813
    eflags = helper_cc_compute_all(CC_OP);
1814
    cf = eflags & CC_C;
1815
    af = eflags & CC_A;
1816
    al = EAX & 0xff;
1817

    
1818
    eflags = 0;
1819
    if (((al & 0x0f) > 9 ) || af) {
1820
        al = (al + 6) & 0xff;
1821
        eflags |= CC_A;
1822
    }
1823
    if ((al > 0x9f) || cf) {
1824
        al = (al + 0x60) & 0xff;
1825
        eflags |= CC_C;
1826
    }
1827
    EAX = (EAX & ~0xff) | al;
1828
    /* well, speed is not an issue here, so we compute the flags by hand */
1829
    eflags |= (al == 0) << 6; /* zf */
1830
    eflags |= parity_table[al]; /* pf */
1831
    eflags |= (al & 0x80); /* sf */
1832
    CC_SRC = eflags;
1833
}
1834

    
1835
void helper_das(void)
1836
{
1837
    int al, al1, af, cf;
1838
    int eflags;
1839

    
1840
    eflags = helper_cc_compute_all(CC_OP);
1841
    cf = eflags & CC_C;
1842
    af = eflags & CC_A;
1843
    al = EAX & 0xff;
1844

    
1845
    eflags = 0;
1846
    al1 = al;
1847
    if (((al & 0x0f) > 9 ) || af) {
1848
        eflags |= CC_A;
1849
        if (al < 6 || cf)
1850
            eflags |= CC_C;
1851
        al = (al - 6) & 0xff;
1852
    }
1853
    if ((al1 > 0x99) || cf) {
1854
        al = (al - 0x60) & 0xff;
1855
        eflags |= CC_C;
1856
    }
1857
    EAX = (EAX & ~0xff) | al;
1858
    /* well, speed is not an issue here, so we compute the flags by hand */
1859
    eflags |= (al == 0) << 6; /* zf */
1860
    eflags |= parity_table[al]; /* pf */
1861
    eflags |= (al & 0x80); /* sf */
1862
    CC_SRC = eflags;
1863
}
1864

    
1865
void helper_into(int next_eip_addend)
1866
{
1867
    int eflags;
1868
    eflags = helper_cc_compute_all(CC_OP);
1869
    if (eflags & CC_O) {
1870
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1871
    }
1872
}
1873

    
1874
void helper_cmpxchg8b(target_ulong a0)
1875
{
1876
    uint64_t d;
1877
    int eflags;
1878

    
1879
    eflags = helper_cc_compute_all(CC_OP);
1880
    d = ldq(a0);
1881
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1882
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1883
        eflags |= CC_Z;
1884
    } else {
1885
        /* always do the store */
1886
        stq(a0, d); 
1887
        EDX = (uint32_t)(d >> 32);
1888
        EAX = (uint32_t)d;
1889
        eflags &= ~CC_Z;
1890
    }
1891
    CC_SRC = eflags;
1892
}
1893

    
1894
#ifdef TARGET_X86_64
1895
void helper_cmpxchg16b(target_ulong a0)
1896
{
1897
    uint64_t d0, d1;
1898
    int eflags;
1899

    
1900
    if ((a0 & 0xf) != 0)
1901
        raise_exception(EXCP0D_GPF);
1902
    eflags = helper_cc_compute_all(CC_OP);
1903
    d0 = ldq(a0);
1904
    d1 = ldq(a0 + 8);
1905
    if (d0 == EAX && d1 == EDX) {
1906
        stq(a0, EBX);
1907
        stq(a0 + 8, ECX);
1908
        eflags |= CC_Z;
1909
    } else {
1910
        /* always do the store */
1911
        stq(a0, d0); 
1912
        stq(a0 + 8, d1); 
1913
        EDX = d1;
1914
        EAX = d0;
1915
        eflags &= ~CC_Z;
1916
    }
1917
    CC_SRC = eflags;
1918
}
1919
#endif
1920

    
1921
void helper_single_step(void)
1922
{
1923
#ifndef CONFIG_USER_ONLY
1924
    check_hw_breakpoints(env, 1);
1925
    env->dr[6] |= DR6_BS;
1926
#endif
1927
    raise_exception(EXCP01_DB);
1928
}
1929

    
1930
void helper_cpuid(void)
1931
{
1932
    uint32_t eax, ebx, ecx, edx;
1933

    
1934
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1935

    
1936
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1937
    EAX = eax;
1938
    EBX = ebx;
1939
    ECX = ecx;
1940
    EDX = edx;
1941
}
1942

    
1943
void helper_enter_level(int level, int data32, target_ulong t1)
1944
{
1945
    target_ulong ssp;
1946
    uint32_t esp_mask, esp, ebp;
1947

    
1948
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1949
    ssp = env->segs[R_SS].base;
1950
    ebp = EBP;
1951
    esp = ESP;
1952
    if (data32) {
1953
        /* 32 bit */
1954
        esp -= 4;
1955
        while (--level) {
1956
            esp -= 4;
1957
            ebp -= 4;
1958
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1959
        }
1960
        esp -= 4;
1961
        stl(ssp + (esp & esp_mask), t1);
1962
    } else {
1963
        /* 16 bit */
1964
        esp -= 2;
1965
        while (--level) {
1966
            esp -= 2;
1967
            ebp -= 2;
1968
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1969
        }
1970
        esp -= 2;
1971
        stw(ssp + (esp & esp_mask), t1);
1972
    }
1973
}
1974

    
1975
#ifdef TARGET_X86_64
1976
void helper_enter64_level(int level, int data64, target_ulong t1)
1977
{
1978
    target_ulong esp, ebp;
1979
    ebp = EBP;
1980
    esp = ESP;
1981

    
1982
    if (data64) {
1983
        /* 64 bit */
1984
        esp -= 8;
1985
        while (--level) {
1986
            esp -= 8;
1987
            ebp -= 8;
1988
            stq(esp, ldq(ebp));
1989
        }
1990
        esp -= 8;
1991
        stq(esp, t1);
1992
    } else {
1993
        /* 16 bit */
1994
        esp -= 2;
1995
        while (--level) {
1996
            esp -= 2;
1997
            ebp -= 2;
1998
            stw(esp, lduw(ebp));
1999
        }
2000
        esp -= 2;
2001
        stw(esp, t1);
2002
    }
2003
}
2004
#endif
2005

    
2006
void helper_lldt(int selector)
2007
{
2008
    SegmentCache *dt;
2009
    uint32_t e1, e2;
2010
    int index, entry_limit;
2011
    target_ulong ptr;
2012

    
2013
    selector &= 0xffff;
2014
    if ((selector & 0xfffc) == 0) {
2015
        /* XXX: NULL selector case: invalid LDT */
2016
        env->ldt.base = 0;
2017
        env->ldt.limit = 0;
2018
    } else {
2019
        if (selector & 0x4)
2020
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2021
        dt = &env->gdt;
2022
        index = selector & ~7;
2023
#ifdef TARGET_X86_64
2024
        if (env->hflags & HF_LMA_MASK)
2025
            entry_limit = 15;
2026
        else
2027
#endif
2028
            entry_limit = 7;
2029
        if ((index + entry_limit) > dt->limit)
2030
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2031
        ptr = dt->base + index;
2032
        e1 = ldl_kernel(ptr);
2033
        e2 = ldl_kernel(ptr + 4);
2034
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2035
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2036
        if (!(e2 & DESC_P_MASK))
2037
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2038
#ifdef TARGET_X86_64
2039
        if (env->hflags & HF_LMA_MASK) {
2040
            uint32_t e3;
2041
            e3 = ldl_kernel(ptr + 8);
2042
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2043
            env->ldt.base |= (target_ulong)e3 << 32;
2044
        } else
2045
#endif
2046
        {
2047
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2048
        }
2049
    }
2050
    env->ldt.selector = selector;
2051
}
2052

    
2053
void helper_ltr(int selector)
2054
{
2055
    SegmentCache *dt;
2056
    uint32_t e1, e2;
2057
    int index, type, entry_limit;
2058
    target_ulong ptr;
2059

    
2060
    selector &= 0xffff;
2061
    if ((selector & 0xfffc) == 0) {
2062
        /* NULL selector case: invalid TR */
2063
        env->tr.base = 0;
2064
        env->tr.limit = 0;
2065
        env->tr.flags = 0;
2066
    } else {
2067
        if (selector & 0x4)
2068
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2069
        dt = &env->gdt;
2070
        index = selector & ~7;
2071
#ifdef TARGET_X86_64
2072
        if (env->hflags & HF_LMA_MASK)
2073
            entry_limit = 15;
2074
        else
2075
#endif
2076
            entry_limit = 7;
2077
        if ((index + entry_limit) > dt->limit)
2078
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2079
        ptr = dt->base + index;
2080
        e1 = ldl_kernel(ptr);
2081
        e2 = ldl_kernel(ptr + 4);
2082
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2083
        if ((e2 & DESC_S_MASK) ||
2084
            (type != 1 && type != 9))
2085
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2086
        if (!(e2 & DESC_P_MASK))
2087
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2088
#ifdef TARGET_X86_64
2089
        if (env->hflags & HF_LMA_MASK) {
2090
            uint32_t e3, e4;
2091
            e3 = ldl_kernel(ptr + 8);
2092
            e4 = ldl_kernel(ptr + 12);
2093
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2094
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2096
            env->tr.base |= (target_ulong)e3 << 32;
2097
        } else
2098
#endif
2099
        {
2100
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2101
        }
2102
        e2 |= DESC_TSS_BUSY_MASK;
2103
        stl_kernel(ptr + 4, e2);
2104
    }
2105
    env->tr.selector = selector;
2106
}
2107

    
2108
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2109
void helper_load_seg(int seg_reg, int selector)
2110
{
2111
    uint32_t e1, e2;
2112
    int cpl, dpl, rpl;
2113
    SegmentCache *dt;
2114
    int index;
2115
    target_ulong ptr;
2116

    
2117
    selector &= 0xffff;
2118
    cpl = env->hflags & HF_CPL_MASK;
2119
    if ((selector & 0xfffc) == 0) {
2120
        /* null selector case */
2121
        if (seg_reg == R_SS
2122
#ifdef TARGET_X86_64
2123
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2124
#endif
2125
            )
2126
            raise_exception_err(EXCP0D_GPF, 0);
2127
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2128
    } else {
2129

    
2130
        if (selector & 0x4)
2131
            dt = &env->ldt;
2132
        else
2133
            dt = &env->gdt;
2134
        index = selector & ~7;
2135
        if ((index + 7) > dt->limit)
2136
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137
        ptr = dt->base + index;
2138
        e1 = ldl_kernel(ptr);
2139
        e2 = ldl_kernel(ptr + 4);
2140

    
2141
        if (!(e2 & DESC_S_MASK))
2142
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2143
        rpl = selector & 3;
2144
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2145
        if (seg_reg == R_SS) {
2146
            /* must be writable segment */
2147
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2148
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2149
            if (rpl != cpl || dpl != cpl)
2150
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2151
        } else {
2152
            /* must be readable segment */
2153
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2154
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2155

    
2156
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2157
                /* if not conforming code, test rights */
2158
                if (dpl < cpl || dpl < rpl)
2159
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160
            }
2161
        }
2162

    
2163
        if (!(e2 & DESC_P_MASK)) {
2164
            if (seg_reg == R_SS)
2165
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2166
            else
2167
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2168
        }
2169

    
2170
        /* set the access bit if not already set */
2171
        if (!(e2 & DESC_A_MASK)) {
2172
            e2 |= DESC_A_MASK;
2173
            stl_kernel(ptr + 4, e2);
2174
        }
2175

    
2176
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2177
                       get_seg_base(e1, e2),
2178
                       get_seg_limit(e1, e2),
2179
                       e2);
2180
#if 0
2181
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2182
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2183
#endif
2184
    }
2185
}
2186

    
2187
/* protected mode jump */
2188
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2189
                           int next_eip_addend)
2190
{
2191
    int gate_cs, type;
2192
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2193
    target_ulong next_eip;
2194

    
2195
    if ((new_cs & 0xfffc) == 0)
2196
        raise_exception_err(EXCP0D_GPF, 0);
2197
    if (load_segment(&e1, &e2, new_cs) != 0)
2198
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2199
    cpl = env->hflags & HF_CPL_MASK;
2200
    if (e2 & DESC_S_MASK) {
2201
        if (!(e2 & DESC_CS_MASK))
2202
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2203
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2204
        if (e2 & DESC_C_MASK) {
2205
            /* conforming code segment */
2206
            if (dpl > cpl)
2207
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
        } else {
2209
            /* non conforming code segment */
2210
            rpl = new_cs & 3;
2211
            if (rpl > cpl)
2212
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213
            if (dpl != cpl)
2214
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2215
        }
2216
        if (!(e2 & DESC_P_MASK))
2217
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2218
        limit = get_seg_limit(e1, e2);
2219
        if (new_eip > limit &&
2220
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2221
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2223
                       get_seg_base(e1, e2), limit, e2);
2224
        EIP = new_eip;
2225
    } else {
2226
        /* jump to call or task gate */
2227
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2228
        rpl = new_cs & 3;
2229
        cpl = env->hflags & HF_CPL_MASK;
2230
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2231
        switch(type) {
2232
        case 1: /* 286 TSS */
2233
        case 9: /* 386 TSS */
2234
        case 5: /* task gate */
2235
            if (dpl < cpl || dpl < rpl)
2236
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2237
            next_eip = env->eip + next_eip_addend;
2238
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2239
            CC_OP = CC_OP_EFLAGS;
2240
            break;
2241
        case 4: /* 286 call gate */
2242
        case 12: /* 386 call gate */
2243
            if ((dpl < cpl) || (dpl < rpl))
2244
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2245
            if (!(e2 & DESC_P_MASK))
2246
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2247
            gate_cs = e1 >> 16;
2248
            new_eip = (e1 & 0xffff);
2249
            if (type == 12)
2250
                new_eip |= (e2 & 0xffff0000);
2251
            if (load_segment(&e1, &e2, gate_cs) != 0)
2252
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2253
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2254
            /* must be code segment */
2255
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2256
                 (DESC_S_MASK | DESC_CS_MASK)))
2257
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2258
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2259
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2260
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261
            if (!(e2 & DESC_P_MASK))
2262
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263
            limit = get_seg_limit(e1, e2);
2264
            if (new_eip > limit)
2265
                raise_exception_err(EXCP0D_GPF, 0);
2266
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2267
                                   get_seg_base(e1, e2), limit, e2);
2268
            EIP = new_eip;
2269
            break;
2270
        default:
2271
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2272
            break;
2273
        }
2274
    }
2275
}
2276

    
2277
/* real mode call */
2278
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2279
                       int shift, int next_eip)
2280
{
2281
    int new_eip;
2282
    uint32_t esp, esp_mask;
2283
    target_ulong ssp;
2284

    
2285
    new_eip = new_eip1;
2286
    esp = ESP;
2287
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2288
    ssp = env->segs[R_SS].base;
2289
    if (shift) {
2290
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2291
        PUSHL(ssp, esp, esp_mask, next_eip);
2292
    } else {
2293
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2294
        PUSHW(ssp, esp, esp_mask, next_eip);
2295
    }
2296

    
2297
    SET_ESP(esp, esp_mask);
2298
    env->eip = new_eip;
2299
    env->segs[R_CS].selector = new_cs;
2300
    env->segs[R_CS].base = (new_cs << 4);
2301
}
2302

    
2303
/* protected mode call */
2304
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2305
                            int shift, int next_eip_addend)
2306
{
2307
    int new_stack, i;
2308
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2309
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2310
    uint32_t val, limit, old_sp_mask;
2311
    target_ulong ssp, old_ssp, next_eip;
2312

    
2313
    next_eip = env->eip + next_eip_addend;
2314
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2315
    LOG_PCALL_STATE(env);
2316
    if ((new_cs & 0xfffc) == 0)
2317
        raise_exception_err(EXCP0D_GPF, 0);
2318
    if (load_segment(&e1, &e2, new_cs) != 0)
2319
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2320
    cpl = env->hflags & HF_CPL_MASK;
2321
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2322
    if (e2 & DESC_S_MASK) {
2323
        if (!(e2 & DESC_CS_MASK))
2324
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2326
        if (e2 & DESC_C_MASK) {
2327
            /* conforming code segment */
2328
            if (dpl > cpl)
2329
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330
        } else {
2331
            /* non conforming code segment */
2332
            rpl = new_cs & 3;
2333
            if (rpl > cpl)
2334
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335
            if (dpl != cpl)
2336
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2337
        }
2338
        if (!(e2 & DESC_P_MASK))
2339
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2340

    
2341
#ifdef TARGET_X86_64
2342
        /* XXX: check 16/32 bit cases in long mode */
2343
        if (shift == 2) {
2344
            target_ulong rsp;
2345
            /* 64 bit case */
2346
            rsp = ESP;
2347
            PUSHQ(rsp, env->segs[R_CS].selector);
2348
            PUSHQ(rsp, next_eip);
2349
            /* from this point, not restartable */
2350
            ESP = rsp;
2351
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2352
                                   get_seg_base(e1, e2),
2353
                                   get_seg_limit(e1, e2), e2);
2354
            EIP = new_eip;
2355
        } else
2356
#endif
2357
        {
2358
            sp = ESP;
2359
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2360
            ssp = env->segs[R_SS].base;
2361
            if (shift) {
2362
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2363
                PUSHL(ssp, sp, sp_mask, next_eip);
2364
            } else {
2365
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2366
                PUSHW(ssp, sp, sp_mask, next_eip);
2367
            }
2368

    
2369
            limit = get_seg_limit(e1, e2);
2370
            if (new_eip > limit)
2371
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2372
            /* from this point, not restartable */
2373
            SET_ESP(sp, sp_mask);
2374
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2375
                                   get_seg_base(e1, e2), limit, e2);
2376
            EIP = new_eip;
2377
        }
2378
    } else {
2379
        /* check gate type */
2380
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2381
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2382
        rpl = new_cs & 3;
2383
        switch(type) {
2384
        case 1: /* available 286 TSS */
2385
        case 9: /* available 386 TSS */
2386
        case 5: /* task gate */
2387
            if (dpl < cpl || dpl < rpl)
2388
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2389
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2390
            CC_OP = CC_OP_EFLAGS;
2391
            return;
2392
        case 4: /* 286 call gate */
2393
        case 12: /* 386 call gate */
2394
            break;
2395
        default:
2396
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2397
            break;
2398
        }
2399
        shift = type >> 3;
2400

    
2401
        if (dpl < cpl || dpl < rpl)
2402
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2403
        /* check valid bit */
2404
        if (!(e2 & DESC_P_MASK))
2405
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2406
        selector = e1 >> 16;
2407
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2408
        param_count = e2 & 0x1f;
2409
        if ((selector & 0xfffc) == 0)
2410
            raise_exception_err(EXCP0D_GPF, 0);
2411

    
2412
        if (load_segment(&e1, &e2, selector) != 0)
2413
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2414
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2415
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2416
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417
        if (dpl > cpl)
2418
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2419
        if (!(e2 & DESC_P_MASK))
2420
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2421

    
2422
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2423
            /* to inner privilege */
2424
            get_ss_esp_from_tss(&ss, &sp, dpl);
2425
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2426
                        ss, sp, param_count, ESP);
2427
            if ((ss & 0xfffc) == 0)
2428
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2429
            if ((ss & 3) != dpl)
2430
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2431
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2432
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2433
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2434
            if (ss_dpl != dpl)
2435
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436
            if (!(ss_e2 & DESC_S_MASK) ||
2437
                (ss_e2 & DESC_CS_MASK) ||
2438
                !(ss_e2 & DESC_W_MASK))
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            if (!(ss_e2 & DESC_P_MASK))
2441
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442

    
2443
            //            push_size = ((param_count * 2) + 8) << shift;
2444

    
2445
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2446
            old_ssp = env->segs[R_SS].base;
2447

    
2448
            sp_mask = get_sp_mask(ss_e2);
2449
            ssp = get_seg_base(ss_e1, ss_e2);
2450
            if (shift) {
2451
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2452
                PUSHL(ssp, sp, sp_mask, ESP);
2453
                for(i = param_count - 1; i >= 0; i--) {
2454
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2455
                    PUSHL(ssp, sp, sp_mask, val);
2456
                }
2457
            } else {
2458
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2459
                PUSHW(ssp, sp, sp_mask, ESP);
2460
                for(i = param_count - 1; i >= 0; i--) {
2461
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2462
                    PUSHW(ssp, sp, sp_mask, val);
2463
                }
2464
            }
2465
            new_stack = 1;
2466
        } else {
2467
            /* to same privilege */
2468
            sp = ESP;
2469
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2470
            ssp = env->segs[R_SS].base;
2471
            //            push_size = (4 << shift);
2472
            new_stack = 0;
2473
        }
2474

    
2475
        if (shift) {
2476
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2477
            PUSHL(ssp, sp, sp_mask, next_eip);
2478
        } else {
2479
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2480
            PUSHW(ssp, sp, sp_mask, next_eip);
2481
        }
2482

    
2483
        /* from this point, not restartable */
2484

    
2485
        if (new_stack) {
2486
            ss = (ss & ~3) | dpl;
2487
            cpu_x86_load_seg_cache(env, R_SS, ss,
2488
                                   ssp,
2489
                                   get_seg_limit(ss_e1, ss_e2),
2490
                                   ss_e2);
2491
        }
2492

    
2493
        selector = (selector & ~3) | dpl;
2494
        cpu_x86_load_seg_cache(env, R_CS, selector,
2495
                       get_seg_base(e1, e2),
2496
                       get_seg_limit(e1, e2),
2497
                       e2);
2498
        cpu_x86_set_cpl(env, dpl);
2499
        SET_ESP(sp, sp_mask);
2500
        EIP = offset;
2501
    }
2502
#ifdef CONFIG_KQEMU
2503
    if (kqemu_is_ok(env)) {
2504
        env->exception_index = -1;
2505
        cpu_loop_exit();
2506
    }
2507
#endif
2508
}
2509

    
2510
/* real and vm86 mode iret */
2511
void helper_iret_real(int shift)
2512
{
2513
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2514
    target_ulong ssp;
2515
    int eflags_mask;
2516

    
2517
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2518
    sp = ESP;
2519
    ssp = env->segs[R_SS].base;
2520
    if (shift == 1) {
2521
        /* 32 bits */
2522
        POPL(ssp, sp, sp_mask, new_eip);
2523
        POPL(ssp, sp, sp_mask, new_cs);
2524
        new_cs &= 0xffff;
2525
        POPL(ssp, sp, sp_mask, new_eflags);
2526
    } else {
2527
        /* 16 bits */
2528
        POPW(ssp, sp, sp_mask, new_eip);
2529
        POPW(ssp, sp, sp_mask, new_cs);
2530
        POPW(ssp, sp, sp_mask, new_eflags);
2531
    }
2532
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2533
    env->segs[R_CS].selector = new_cs;
2534
    env->segs[R_CS].base = (new_cs << 4);
2535
    env->eip = new_eip;
2536
    if (env->eflags & VM_MASK)
2537
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2538
    else
2539
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2540
    if (shift == 0)
2541
        eflags_mask &= 0xffff;
2542
    load_eflags(new_eflags, eflags_mask);
2543
    env->hflags2 &= ~HF2_NMI_MASK;
2544
}
2545

    
2546
static inline void validate_seg(int seg_reg, int cpl)
2547
{
2548
    int dpl;
2549
    uint32_t e2;
2550

    
2551
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2552
       they may still contain a valid base. I would be interested to
2553
       know how a real x86_64 CPU behaves */
2554
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2555
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2556
        return;
2557

    
2558
    e2 = env->segs[seg_reg].flags;
2559
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2560
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2561
        /* data or non conforming code segment */
2562
        if (dpl < cpl) {
2563
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2564
        }
2565
    }
2566
}
2567

    
2568
/* protected mode iret */
2569
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2570
{
2571
    uint32_t new_cs, new_eflags, new_ss;
2572
    uint32_t new_es, new_ds, new_fs, new_gs;
2573
    uint32_t e1, e2, ss_e1, ss_e2;
2574
    int cpl, dpl, rpl, eflags_mask, iopl;
2575
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2576

    
2577
#ifdef TARGET_X86_64
2578
    if (shift == 2)
2579
        sp_mask = -1;
2580
    else
2581
#endif
2582
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2583
    sp = ESP;
2584
    ssp = env->segs[R_SS].base;
2585
    new_eflags = 0; /* avoid warning */
2586
#ifdef TARGET_X86_64
2587
    if (shift == 2) {
2588
        POPQ(sp, new_eip);
2589
        POPQ(sp, new_cs);
2590
        new_cs &= 0xffff;
2591
        if (is_iret) {
2592
            POPQ(sp, new_eflags);
2593
        }
2594
    } else
2595
#endif
2596
    if (shift == 1) {
2597
        /* 32 bits */
2598
        POPL(ssp, sp, sp_mask, new_eip);
2599
        POPL(ssp, sp, sp_mask, new_cs);
2600
        new_cs &= 0xffff;
2601
        if (is_iret) {
2602
            POPL(ssp, sp, sp_mask, new_eflags);
2603
            if (new_eflags & VM_MASK)
2604
                goto return_to_vm86;
2605
        }
2606
    } else {
2607
        /* 16 bits */
2608
        POPW(ssp, sp, sp_mask, new_eip);
2609
        POPW(ssp, sp, sp_mask, new_cs);
2610
        if (is_iret)
2611
            POPW(ssp, sp, sp_mask, new_eflags);
2612
    }
2613
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2614
              new_cs, new_eip, shift, addend);
2615
    LOG_PCALL_STATE(env);
2616
    if ((new_cs & 0xfffc) == 0)
2617
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2618
    if (load_segment(&e1, &e2, new_cs) != 0)
2619
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620
    if (!(e2 & DESC_S_MASK) ||
2621
        !(e2 & DESC_CS_MASK))
2622
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2623
    cpl = env->hflags & HF_CPL_MASK;
2624
    rpl = new_cs & 3;
2625
    if (rpl < cpl)
2626
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2628
    if (e2 & DESC_C_MASK) {
2629
        if (dpl > rpl)
2630
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631
    } else {
2632
        if (dpl != rpl)
2633
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634
    }
2635
    if (!(e2 & DESC_P_MASK))
2636
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2637

    
2638
    sp += addend;
2639
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2640
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2641
        /* return to same privilege level */
2642
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2643
                       get_seg_base(e1, e2),
2644
                       get_seg_limit(e1, e2),
2645
                       e2);
2646
    } else {
2647
        /* return to different privilege level */
2648
#ifdef TARGET_X86_64
2649
        if (shift == 2) {
2650
            POPQ(sp, new_esp);
2651
            POPQ(sp, new_ss);
2652
            new_ss &= 0xffff;
2653
        } else
2654
#endif
2655
        if (shift == 1) {
2656
            /* 32 bits */
2657
            POPL(ssp, sp, sp_mask, new_esp);
2658
            POPL(ssp, sp, sp_mask, new_ss);
2659
            new_ss &= 0xffff;
2660
        } else {
2661
            /* 16 bits */
2662
            POPW(ssp, sp, sp_mask, new_esp);
2663
            POPW(ssp, sp, sp_mask, new_ss);
2664
        }
2665
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2666
                    new_ss, new_esp);
2667
        if ((new_ss & 0xfffc) == 0) {
2668
#ifdef TARGET_X86_64
2669
            /* NULL ss is allowed in long mode if cpl != 3*/
2670
            /* XXX: test CS64 ? */
2671
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2672
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2673
                                       0, 0xffffffff,
2674
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2675
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2676
                                       DESC_W_MASK | DESC_A_MASK);
2677
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2678
            } else
2679
#endif
2680
            {
2681
                raise_exception_err(EXCP0D_GPF, 0);
2682
            }
2683
        } else {
2684
            if ((new_ss & 3) != rpl)
2685
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2686
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2687
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2688
            if (!(ss_e2 & DESC_S_MASK) ||
2689
                (ss_e2 & DESC_CS_MASK) ||
2690
                !(ss_e2 & DESC_W_MASK))
2691
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2692
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2693
            if (dpl != rpl)
2694
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695
            if (!(ss_e2 & DESC_P_MASK))
2696
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2697
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2698
                                   get_seg_base(ss_e1, ss_e2),
2699
                                   get_seg_limit(ss_e1, ss_e2),
2700
                                   ss_e2);
2701
        }
2702

    
2703
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2704
                       get_seg_base(e1, e2),
2705
                       get_seg_limit(e1, e2),
2706
                       e2);
2707
        cpu_x86_set_cpl(env, rpl);
2708
        sp = new_esp;
2709
#ifdef TARGET_X86_64
2710
        if (env->hflags & HF_CS64_MASK)
2711
            sp_mask = -1;
2712
        else
2713
#endif
2714
            sp_mask = get_sp_mask(ss_e2);
2715

    
2716
        /* validate data segments */
2717
        validate_seg(R_ES, rpl);
2718
        validate_seg(R_DS, rpl);
2719
        validate_seg(R_FS, rpl);
2720
        validate_seg(R_GS, rpl);
2721

    
2722
        sp += addend;
2723
    }
2724
    SET_ESP(sp, sp_mask);
2725
    env->eip = new_eip;
2726
    if (is_iret) {
2727
        /* NOTE: 'cpl' is the _old_ CPL */
2728
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2729
        if (cpl == 0)
2730
            eflags_mask |= IOPL_MASK;
2731
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2732
        if (cpl <= iopl)
2733
            eflags_mask |= IF_MASK;
2734
        if (shift == 0)
2735
            eflags_mask &= 0xffff;
2736
        load_eflags(new_eflags, eflags_mask);
2737
    }
2738
    return;
2739

    
2740
 return_to_vm86:
2741
    POPL(ssp, sp, sp_mask, new_esp);
2742
    POPL(ssp, sp, sp_mask, new_ss);
2743
    POPL(ssp, sp, sp_mask, new_es);
2744
    POPL(ssp, sp, sp_mask, new_ds);
2745
    POPL(ssp, sp, sp_mask, new_fs);
2746
    POPL(ssp, sp, sp_mask, new_gs);
2747

    
2748
    /* modify processor state */
2749
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2750
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2751
    load_seg_vm(R_CS, new_cs & 0xffff);
2752
    cpu_x86_set_cpl(env, 3);
2753
    load_seg_vm(R_SS, new_ss & 0xffff);
2754
    load_seg_vm(R_ES, new_es & 0xffff);
2755
    load_seg_vm(R_DS, new_ds & 0xffff);
2756
    load_seg_vm(R_FS, new_fs & 0xffff);
2757
    load_seg_vm(R_GS, new_gs & 0xffff);
2758

    
2759
    env->eip = new_eip & 0xffff;
2760
    ESP = new_esp;
2761
}
2762

    
2763
void helper_iret_protected(int shift, int next_eip)
2764
{
2765
    int tss_selector, type;
2766
    uint32_t e1, e2;
2767

    
2768
    /* specific case for TSS */
2769
    if (env->eflags & NT_MASK) {
2770
#ifdef TARGET_X86_64
2771
        if (env->hflags & HF_LMA_MASK)
2772
            raise_exception_err(EXCP0D_GPF, 0);
2773
#endif
2774
        tss_selector = lduw_kernel(env->tr.base + 0);
2775
        if (tss_selector & 4)
2776
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2777
        if (load_segment(&e1, &e2, tss_selector) != 0)
2778
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2779
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2780
        /* NOTE: we check both segment and busy TSS */
2781
        if (type != 3)
2782
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2783
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2784
    } else {
2785
        helper_ret_protected(shift, 1, 0);
2786
    }
2787
    env->hflags2 &= ~HF2_NMI_MASK;
2788
#ifdef CONFIG_KQEMU
2789
    if (kqemu_is_ok(env)) {
2790
        CC_OP = CC_OP_EFLAGS;
2791
        env->exception_index = -1;
2792
        cpu_loop_exit();
2793
    }
2794
#endif
2795
}
2796

    
2797
void helper_lret_protected(int shift, int addend)
2798
{
2799
    helper_ret_protected(shift, 0, addend);
2800
#ifdef CONFIG_KQEMU
2801
    if (kqemu_is_ok(env)) {
2802
        env->exception_index = -1;
2803
        cpu_loop_exit();
2804
    }
2805
#endif
2806
}
2807

    
2808
void helper_sysenter(void)
2809
{
2810
    if (env->sysenter_cs == 0) {
2811
        raise_exception_err(EXCP0D_GPF, 0);
2812
    }
2813
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2814
    cpu_x86_set_cpl(env, 0);
2815

    
2816
#ifdef TARGET_X86_64
2817
    if (env->hflags & HF_LMA_MASK) {
2818
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2819
                               0, 0xffffffff,
2820
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2821
                               DESC_S_MASK |
2822
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2823
    } else
2824
#endif
2825
    {
2826
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2827
                               0, 0xffffffff,
2828
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2829
                               DESC_S_MASK |
2830
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2831
    }
2832
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2833
                           0, 0xffffffff,
2834
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835
                           DESC_S_MASK |
2836
                           DESC_W_MASK | DESC_A_MASK);
2837
    ESP = env->sysenter_esp;
2838
    EIP = env->sysenter_eip;
2839
}
2840

    
2841
void helper_sysexit(int dflag)
2842
{
2843
    int cpl;
2844

    
2845
    cpl = env->hflags & HF_CPL_MASK;
2846
    if (env->sysenter_cs == 0 || cpl != 0) {
2847
        raise_exception_err(EXCP0D_GPF, 0);
2848
    }
2849
    cpu_x86_set_cpl(env, 3);
2850
#ifdef TARGET_X86_64
2851
    if (dflag == 2) {
2852
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2853
                               0, 0xffffffff,
2854
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2855
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2856
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2857
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2858
                               0, 0xffffffff,
2859
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2860
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2861
                               DESC_W_MASK | DESC_A_MASK);
2862
    } else
2863
#endif
2864
    {
2865
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2866
                               0, 0xffffffff,
2867
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2868
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2869
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2870
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2871
                               0, 0xffffffff,
2872
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2873
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2874
                               DESC_W_MASK | DESC_A_MASK);
2875
    }
2876
    ESP = ECX;
2877
    EIP = EDX;
2878
#ifdef CONFIG_KQEMU
2879
    if (kqemu_is_ok(env)) {
2880
        env->exception_index = -1;
2881
        cpu_loop_exit();
2882
    }
2883
#endif
2884
}
2885

    
2886
#if defined(CONFIG_USER_ONLY)
2887
target_ulong helper_read_crN(int reg)
2888
{
2889
    return 0;
2890
}
2891

    
2892
void helper_write_crN(int reg, target_ulong t0)
2893
{
2894
}
2895

    
2896
void helper_movl_drN_T0(int reg, target_ulong t0)
2897
{
2898
}
2899
#else
2900
target_ulong helper_read_crN(int reg)
2901
{
2902
    target_ulong val;
2903

    
2904
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2905
    switch(reg) {
2906
    default:
2907
        val = env->cr[reg];
2908
        break;
2909
    case 8:
2910
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2911
            val = cpu_get_apic_tpr(env);
2912
        } else {
2913
            val = env->v_tpr;
2914
        }
2915
        break;
2916
    }
2917
    return val;
2918
}
2919

    
2920
void helper_write_crN(int reg, target_ulong t0)
2921
{
2922
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2923
    switch(reg) {
2924
    case 0:
2925
        cpu_x86_update_cr0(env, t0);
2926
        break;
2927
    case 3:
2928
        cpu_x86_update_cr3(env, t0);
2929
        break;
2930
    case 4:
2931
        cpu_x86_update_cr4(env, t0);
2932
        break;
2933
    case 8:
2934
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2935
            cpu_set_apic_tpr(env, t0);
2936
        }
2937
        env->v_tpr = t0 & 0x0f;
2938
        break;
2939
    default:
2940
        env->cr[reg] = t0;
2941
        break;
2942
    }
2943
}
2944

    
2945
void helper_movl_drN_T0(int reg, target_ulong t0)
2946
{
2947
    int i;
2948

    
2949
    if (reg < 4) {
2950
        hw_breakpoint_remove(env, reg);
2951
        env->dr[reg] = t0;
2952
        hw_breakpoint_insert(env, reg);
2953
    } else if (reg == 7) {
2954
        for (i = 0; i < 4; i++)
2955
            hw_breakpoint_remove(env, i);
2956
        env->dr[7] = t0;
2957
        for (i = 0; i < 4; i++)
2958
            hw_breakpoint_insert(env, i);
2959
    } else
2960
        env->dr[reg] = t0;
2961
}
2962
#endif
2963

    
2964
void helper_lmsw(target_ulong t0)
2965
{
2966
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2967
       if already set to one. */
2968
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2969
    helper_write_crN(0, t0);
2970
}
2971

    
2972
void helper_clts(void)
2973
{
2974
    env->cr[0] &= ~CR0_TS_MASK;
2975
    env->hflags &= ~HF_TS_MASK;
2976
}
2977

    
2978
void helper_invlpg(target_ulong addr)
2979
{
2980
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2981
    tlb_flush_page(env, addr);
2982
}
2983

    
2984
void helper_rdtsc(void)
2985
{
2986
    uint64_t val;
2987

    
2988
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2989
        raise_exception(EXCP0D_GPF);
2990
    }
2991
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2992

    
2993
    val = cpu_get_tsc(env) + env->tsc_offset;
2994
    EAX = (uint32_t)(val);
2995
    EDX = (uint32_t)(val >> 32);
2996
}
2997

    
2998
void helper_rdpmc(void)
2999
{
3000
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3001
        raise_exception(EXCP0D_GPF);
3002
    }
3003
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3004
    
3005
    /* currently unimplemented */
3006
    raise_exception_err(EXCP06_ILLOP, 0);
3007
}
3008

    
3009
#if defined(CONFIG_USER_ONLY)
3010
void helper_wrmsr(void)
3011
{
3012
}
3013

    
3014
void helper_rdmsr(void)
3015
{
3016
}
3017
#else
3018
void helper_wrmsr(void)
3019
{
3020
    uint64_t val;
3021

    
3022
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3023

    
3024
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3025

    
3026
    switch((uint32_t)ECX) {
3027
    case MSR_IA32_SYSENTER_CS:
3028
        env->sysenter_cs = val & 0xffff;
3029
        break;
3030
    case MSR_IA32_SYSENTER_ESP:
3031
        env->sysenter_esp = val;
3032
        break;
3033
    case MSR_IA32_SYSENTER_EIP:
3034
        env->sysenter_eip = val;
3035
        break;
3036
    case MSR_IA32_APICBASE:
3037
        cpu_set_apic_base(env, val);
3038
        break;
3039
    case MSR_EFER:
3040
        {
3041
            uint64_t update_mask;
3042
            update_mask = 0;
3043
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3044
                update_mask |= MSR_EFER_SCE;
3045
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3046
                update_mask |= MSR_EFER_LME;
3047
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3048
                update_mask |= MSR_EFER_FFXSR;
3049
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3050
                update_mask |= MSR_EFER_NXE;
3051
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3052
                update_mask |= MSR_EFER_SVME;
3053
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3054
                update_mask |= MSR_EFER_FFXSR;
3055
            cpu_load_efer(env, (env->efer & ~update_mask) |
3056
                          (val & update_mask));
3057
        }
3058
        break;
3059
    case MSR_STAR:
3060
        env->star = val;
3061
        break;
3062
    case MSR_PAT:
3063
        env->pat = val;
3064
        break;
3065
    case MSR_VM_HSAVE_PA:
3066
        env->vm_hsave = val;
3067
        break;
3068
#ifdef TARGET_X86_64
3069
    case MSR_LSTAR:
3070
        env->lstar = val;
3071
        break;
3072
    case MSR_CSTAR:
3073
        env->cstar = val;
3074
        break;
3075
    case MSR_FMASK:
3076
        env->fmask = val;
3077
        break;
3078
    case MSR_FSBASE:
3079
        env->segs[R_FS].base = val;
3080
        break;
3081
    case MSR_GSBASE:
3082
        env->segs[R_GS].base = val;
3083
        break;
3084
    case MSR_KERNELGSBASE:
3085
        env->kernelgsbase = val;
3086
        break;
3087
#endif
3088
    case MSR_MTRRphysBase(0):
3089
    case MSR_MTRRphysBase(1):
3090
    case MSR_MTRRphysBase(2):
3091
    case MSR_MTRRphysBase(3):
3092
    case MSR_MTRRphysBase(4):
3093
    case MSR_MTRRphysBase(5):
3094
    case MSR_MTRRphysBase(6):
3095
    case MSR_MTRRphysBase(7):
3096
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3097
        break;
3098
    case MSR_MTRRphysMask(0):
3099
    case MSR_MTRRphysMask(1):
3100
    case MSR_MTRRphysMask(2):
3101
    case MSR_MTRRphysMask(3):
3102
    case MSR_MTRRphysMask(4):
3103
    case MSR_MTRRphysMask(5):
3104
    case MSR_MTRRphysMask(6):
3105
    case MSR_MTRRphysMask(7):
3106
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3107
        break;
3108
    case MSR_MTRRfix64K_00000:
3109
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3110
        break;
3111
    case MSR_MTRRfix16K_80000:
3112
    case MSR_MTRRfix16K_A0000:
3113
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3114
        break;
3115
    case MSR_MTRRfix4K_C0000:
3116
    case MSR_MTRRfix4K_C8000:
3117
    case MSR_MTRRfix4K_D0000:
3118
    case MSR_MTRRfix4K_D8000:
3119
    case MSR_MTRRfix4K_E0000:
3120
    case MSR_MTRRfix4K_E8000:
3121
    case MSR_MTRRfix4K_F0000:
3122
    case MSR_MTRRfix4K_F8000:
3123
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3124
        break;
3125
    case MSR_MTRRdefType:
3126
        env->mtrr_deftype = val;
3127
        break;
3128
    default:
3129
        /* XXX: exception ? */
3130
        break;
3131
    }
3132
}
3133

    
3134
void helper_rdmsr(void)
3135
{
3136
    uint64_t val;
3137

    
3138
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3139

    
3140
    switch((uint32_t)ECX) {
3141
    case MSR_IA32_SYSENTER_CS:
3142
        val = env->sysenter_cs;
3143
        break;
3144
    case MSR_IA32_SYSENTER_ESP:
3145
        val = env->sysenter_esp;
3146
        break;
3147
    case MSR_IA32_SYSENTER_EIP:
3148
        val = env->sysenter_eip;
3149
        break;
3150
    case MSR_IA32_APICBASE:
3151
        val = cpu_get_apic_base(env);
3152
        break;
3153
    case MSR_EFER:
3154
        val = env->efer;
3155
        break;
3156
    case MSR_STAR:
3157
        val = env->star;
3158
        break;
3159
    case MSR_PAT:
3160
        val = env->pat;
3161
        break;
3162
    case MSR_VM_HSAVE_PA:
3163
        val = env->vm_hsave;
3164
        break;
3165
    case MSR_IA32_PERF_STATUS:
3166
        /* tsc_increment_by_tick */
3167
        val = 1000ULL;
3168
        /* CPU multiplier */
3169
        val |= (((uint64_t)4ULL) << 40);
3170
        break;
3171
#ifdef TARGET_X86_64
3172
    case MSR_LSTAR:
3173
        val = env->lstar;
3174
        break;
3175
    case MSR_CSTAR:
3176
        val = env->cstar;
3177
        break;
3178
    case MSR_FMASK:
3179
        val = env->fmask;
3180
        break;
3181
    case MSR_FSBASE:
3182
        val = env->segs[R_FS].base;
3183
        break;
3184
    case MSR_GSBASE:
3185
        val = env->segs[R_GS].base;
3186
        break;
3187
    case MSR_KERNELGSBASE:
3188
        val = env->kernelgsbase;
3189
        break;
3190
#endif
3191
#ifdef CONFIG_KQEMU
3192
    case MSR_QPI_COMMBASE:
3193
        if (env->kqemu_enabled) {
3194
            val = kqemu_comm_base;
3195
        } else {
3196
            val = 0;
3197
        }
3198
        break;
3199
#endif
3200
    case MSR_MTRRphysBase(0):
3201
    case MSR_MTRRphysBase(1):
3202
    case MSR_MTRRphysBase(2):
3203
    case MSR_MTRRphysBase(3):
3204
    case MSR_MTRRphysBase(4):
3205
    case MSR_MTRRphysBase(5):
3206
    case MSR_MTRRphysBase(6):
3207
    case MSR_MTRRphysBase(7):
3208
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3209
        break;
3210
    case MSR_MTRRphysMask(0):
3211
    case MSR_MTRRphysMask(1):
3212
    case MSR_MTRRphysMask(2):
3213
    case MSR_MTRRphysMask(3):
3214
    case MSR_MTRRphysMask(4):
3215
    case MSR_MTRRphysMask(5):
3216
    case MSR_MTRRphysMask(6):
3217
    case MSR_MTRRphysMask(7):
3218
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3219
        break;
3220
    case MSR_MTRRfix64K_00000:
3221
        val = env->mtrr_fixed[0];
3222
        break;
3223
    case MSR_MTRRfix16K_80000:
3224
    case MSR_MTRRfix16K_A0000:
3225
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3226
        break;
3227
    case MSR_MTRRfix4K_C0000:
3228
    case MSR_MTRRfix4K_C8000:
3229
    case MSR_MTRRfix4K_D0000:
3230
    case MSR_MTRRfix4K_D8000:
3231
    case MSR_MTRRfix4K_E0000:
3232
    case MSR_MTRRfix4K_E8000:
3233
    case MSR_MTRRfix4K_F0000:
3234
    case MSR_MTRRfix4K_F8000:
3235
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3236
        break;
3237
    case MSR_MTRRdefType:
3238
        val = env->mtrr_deftype;
3239
        break;
3240
    case MSR_MTRRcap:
3241
        if (env->cpuid_features & CPUID_MTRR)
3242
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3243
        else
3244
            /* XXX: exception ? */
3245
            val = 0;
3246
        break;
3247
    default:
3248
        /* XXX: exception ? */
3249
        val = 0;
3250
        break;
3251
    }
3252
    EAX = (uint32_t)(val);
3253
    EDX = (uint32_t)(val >> 32);
3254
}
3255
#endif
3256

    
3257
target_ulong helper_lsl(target_ulong selector1)
3258
{
3259
    unsigned int limit;
3260
    uint32_t e1, e2, eflags, selector;
3261
    int rpl, dpl, cpl, type;
3262

    
3263
    selector = selector1 & 0xffff;
3264
    eflags = helper_cc_compute_all(CC_OP);
3265
    if ((selector & 0xfffc) == 0)
3266
        goto fail;
3267
    if (load_segment(&e1, &e2, selector) != 0)
3268
        goto fail;
3269
    rpl = selector & 3;
3270
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3271
    cpl = env->hflags & HF_CPL_MASK;
3272
    if (e2 & DESC_S_MASK) {
3273
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3274
            /* conforming */
3275
        } else {
3276
            if (dpl < cpl || dpl < rpl)
3277
                goto fail;
3278
        }
3279
    } else {
3280
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3281
        switch(type) {
3282
        case 1:
3283
        case 2:
3284
        case 3:
3285
        case 9:
3286
        case 11:
3287
            break;
3288
        default:
3289
            goto fail;
3290
        }
3291
        if (dpl < cpl || dpl < rpl) {
3292
        fail:
3293
            CC_SRC = eflags & ~CC_Z;
3294
            return 0;
3295
        }
3296
    }
3297
    limit = get_seg_limit(e1, e2);
3298
    CC_SRC = eflags | CC_Z;
3299
    return limit;
3300
}
3301

    
3302
target_ulong helper_lar(target_ulong selector1)
3303
{
3304
    uint32_t e1, e2, eflags, selector;
3305
    int rpl, dpl, cpl, type;
3306

    
3307
    selector = selector1 & 0xffff;
3308
    eflags = helper_cc_compute_all(CC_OP);
3309
    if ((selector & 0xfffc) == 0)
3310
        goto fail;
3311
    if (load_segment(&e1, &e2, selector) != 0)
3312
        goto fail;
3313
    rpl = selector & 3;
3314
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3315
    cpl = env->hflags & HF_CPL_MASK;
3316
    if (e2 & DESC_S_MASK) {
3317
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3318
            /* conforming */
3319
        } else {
3320
            if (dpl < cpl || dpl < rpl)
3321
                goto fail;
3322
        }
3323
    } else {
3324
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3325
        switch(type) {
3326
        case 1:
3327
        case 2:
3328
        case 3:
3329
        case 4:
3330
        case 5:
3331
        case 9:
3332
        case 11:
3333
        case 12:
3334
            break;
3335
        default:
3336
            goto fail;
3337
        }
3338
        if (dpl < cpl || dpl < rpl) {
3339
        fail:
3340
            CC_SRC = eflags & ~CC_Z;
3341
            return 0;
3342
        }
3343
    }
3344
    CC_SRC = eflags | CC_Z;
3345
    return e2 & 0x00f0ff00;
3346
}
3347

    
3348
void helper_verr(target_ulong selector1)
3349
{
3350
    uint32_t e1, e2, eflags, selector;
3351
    int rpl, dpl, cpl;
3352

    
3353
    selector = selector1 & 0xffff;
3354
    eflags = helper_cc_compute_all(CC_OP);
3355
    if ((selector & 0xfffc) == 0)
3356
        goto fail;
3357
    if (load_segment(&e1, &e2, selector) != 0)
3358
        goto fail;
3359
    if (!(e2 & DESC_S_MASK))
3360
        goto fail;
3361
    rpl = selector & 3;
3362
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3363
    cpl = env->hflags & HF_CPL_MASK;
3364
    if (e2 & DESC_CS_MASK) {
3365
        if (!(e2 & DESC_R_MASK))
3366
            goto fail;
3367
        if (!(e2 & DESC_C_MASK)) {
3368
            if (dpl < cpl || dpl < rpl)
3369
                goto fail;
3370
        }
3371
    } else {
3372
        if (dpl < cpl || dpl < rpl) {
3373
        fail:
3374
            CC_SRC = eflags & ~CC_Z;
3375
            return;
3376
        }
3377
    }
3378
    CC_SRC = eflags | CC_Z;
3379
}
3380

    
3381
void helper_verw(target_ulong selector1)
3382
{
3383
    uint32_t e1, e2, eflags, selector;
3384
    int rpl, dpl, cpl;
3385

    
3386
    selector = selector1 & 0xffff;
3387
    eflags = helper_cc_compute_all(CC_OP);
3388
    if ((selector & 0xfffc) == 0)
3389
        goto fail;
3390
    if (load_segment(&e1, &e2, selector) != 0)
3391
        goto fail;
3392
    if (!(e2 & DESC_S_MASK))
3393
        goto fail;
3394
    rpl = selector & 3;
3395
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3396
    cpl = env->hflags & HF_CPL_MASK;
3397
    if (e2 & DESC_CS_MASK) {
3398
        goto fail;
3399
    } else {
3400
        if (dpl < cpl || dpl < rpl)
3401
            goto fail;
3402
        if (!(e2 & DESC_W_MASK)) {
3403
        fail:
3404
            CC_SRC = eflags & ~CC_Z;
3405
            return;
3406
        }
3407
    }
3408
    CC_SRC = eflags | CC_Z;
3409
}
3410

    
3411
/* x87 FPU helpers */
3412

    
3413
static void fpu_set_exception(int mask)
3414
{
3415
    env->fpus |= mask;
3416
    if (env->fpus & (~env->fpuc & FPUC_EM))
3417
        env->fpus |= FPUS_SE | FPUS_B;
3418
}
3419

    
3420
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3421
{
3422
    if (b == 0.0)
3423
        fpu_set_exception(FPUS_ZE);
3424
    return a / b;
3425
}
3426

    
3427
static void fpu_raise_exception(void)
3428
{
3429
    if (env->cr[0] & CR0_NE_MASK) {
3430
        raise_exception(EXCP10_COPR);
3431
    }
3432
#if !defined(CONFIG_USER_ONLY)
3433
    else {
3434
        cpu_set_ferr(env);
3435
    }
3436
#endif
3437
}
3438

    
3439
void helper_flds_FT0(uint32_t val)
3440
{
3441
    union {
3442
        float32 f;
3443
        uint32_t i;
3444
    } u;
3445
    u.i = val;
3446
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3447
}
3448

    
3449
void helper_fldl_FT0(uint64_t val)
3450
{
3451
    union {
3452
        float64 f;
3453
        uint64_t i;
3454
    } u;
3455
    u.i = val;
3456
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3457
}
3458

    
3459
void helper_fildl_FT0(int32_t val)
3460
{
3461
    FT0 = int32_to_floatx(val, &env->fp_status);
3462
}
3463

    
3464
void helper_flds_ST0(uint32_t val)
3465
{
3466
    int new_fpstt;
3467
    union {
3468
        float32 f;
3469
        uint32_t i;
3470
    } u;
3471
    new_fpstt = (env->fpstt - 1) & 7;
3472
    u.i = val;
3473
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3474
    env->fpstt = new_fpstt;
3475
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3476
}
3477

    
3478
void helper_fldl_ST0(uint64_t val)
3479
{
3480
    int new_fpstt;
3481
    union {
3482
        float64 f;
3483
        uint64_t i;
3484
    } u;
3485
    new_fpstt = (env->fpstt - 1) & 7;
3486
    u.i = val;
3487
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3488
    env->fpstt = new_fpstt;
3489
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3490
}
3491

    
3492
void helper_fildl_ST0(int32_t val)
3493
{
3494
    int new_fpstt;
3495
    new_fpstt = (env->fpstt - 1) & 7;
3496
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3497
    env->fpstt = new_fpstt;
3498
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3499
}
3500

    
3501
void helper_fildll_ST0(int64_t val)
3502
{
3503
    int new_fpstt;
3504
    new_fpstt = (env->fpstt - 1) & 7;
3505
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3506
    env->fpstt = new_fpstt;
3507
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3508
}
3509

    
3510
uint32_t helper_fsts_ST0(void)
3511
{
3512
    union {
3513
        float32 f;
3514
        uint32_t i;
3515
    } u;
3516
    u.f = floatx_to_float32(ST0, &env->fp_status);
3517
    return u.i;
3518
}
3519

    
3520
uint64_t helper_fstl_ST0(void)
3521
{
3522
    union {
3523
        float64 f;
3524
        uint64_t i;
3525
    } u;
3526
    u.f = floatx_to_float64(ST0, &env->fp_status);
3527
    return u.i;
3528
}
3529

    
3530
int32_t helper_fist_ST0(void)
3531
{
3532
    int32_t val;
3533
    val = floatx_to_int32(ST0, &env->fp_status);
3534
    if (val != (int16_t)val)
3535
        val = -32768;
3536
    return val;
3537
}
3538

    
3539
int32_t helper_fistl_ST0(void)
3540
{
3541
    int32_t val;
3542
    val = floatx_to_int32(ST0, &env->fp_status);
3543
    return val;
3544
}
3545

    
3546
int64_t helper_fistll_ST0(void)
3547
{
3548
    int64_t val;
3549
    val = floatx_to_int64(ST0, &env->fp_status);
3550
    return val;
3551
}
3552

    
3553
int32_t helper_fistt_ST0(void)
3554
{
3555
    int32_t val;
3556
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3557
    if (val != (int16_t)val)
3558
        val = -32768;
3559
    return val;
3560
}
3561

    
3562
int32_t helper_fisttl_ST0(void)
3563
{
3564
    int32_t val;
3565
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3566
    return val;
3567
}
3568

    
3569
int64_t helper_fisttll_ST0(void)
3570
{
3571
    int64_t val;
3572
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3573
    return val;
3574
}
3575

    
3576
void helper_fldt_ST0(target_ulong ptr)
3577
{
3578
    int new_fpstt;
3579
    new_fpstt = (env->fpstt - 1) & 7;
3580
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3581
    env->fpstt = new_fpstt;
3582
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3583
}
3584

    
3585
void helper_fstt_ST0(target_ulong ptr)
3586
{
3587
    helper_fstt(ST0, ptr);
3588
}
3589

    
3590
void helper_fpush(void)
3591
{
3592
    fpush();
3593
}
3594

    
3595
void helper_fpop(void)
3596
{
3597
    fpop();
3598
}
3599

    
3600
void helper_fdecstp(void)
3601
{
3602
    env->fpstt = (env->fpstt - 1) & 7;
3603
    env->fpus &= (~0x4700);
3604
}
3605

    
3606
void helper_fincstp(void)
3607
{
3608
    env->fpstt = (env->fpstt + 1) & 7;
3609
    env->fpus &= (~0x4700);
3610
}
3611

    
3612
/* FPU move */
3613

    
3614
void helper_ffree_STN(int st_index)
3615
{
3616
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3617
}
3618

    
3619
void helper_fmov_ST0_FT0(void)
3620
{
3621
    ST0 = FT0;
3622
}
3623

    
3624
void helper_fmov_FT0_STN(int st_index)
3625
{
3626
    FT0 = ST(st_index);
3627
}
3628

    
3629
void helper_fmov_ST0_STN(int st_index)
3630
{
3631
    ST0 = ST(st_index);
3632
}
3633

    
3634
void helper_fmov_STN_ST0(int st_index)
3635
{
3636
    ST(st_index) = ST0;
3637
}
3638

    
3639
void helper_fxchg_ST0_STN(int st_index)
3640
{
3641
    CPU86_LDouble tmp;
3642
    tmp = ST(st_index);
3643
    ST(st_index) = ST0;
3644
    ST0 = tmp;
3645
}
3646

    
3647
/* FPU operations */
3648

    
3649
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3650

    
3651
void helper_fcom_ST0_FT0(void)
3652
{
3653
    int ret;
3654

    
3655
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3656
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3657
}
3658

    
3659
void helper_fucom_ST0_FT0(void)
3660
{
3661
    int ret;
3662

    
3663
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3664
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3665
}
3666

    
3667
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3668

    
3669
void helper_fcomi_ST0_FT0(void)
3670
{
3671
    int eflags;
3672
    int ret;
3673

    
3674
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3675
    eflags = helper_cc_compute_all(CC_OP);
3676
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3677
    CC_SRC = eflags;
3678
}
3679

    
3680
void helper_fucomi_ST0_FT0(void)
3681
{
3682
    int eflags;
3683
    int ret;
3684

    
3685
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3686
    eflags = helper_cc_compute_all(CC_OP);
3687
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3688
    CC_SRC = eflags;
3689
}
3690

    
3691
void helper_fadd_ST0_FT0(void)
3692
{
3693
    ST0 += FT0;
3694
}
3695

    
3696
void helper_fmul_ST0_FT0(void)
3697
{
3698
    ST0 *= FT0;
3699
}
3700

    
3701
void helper_fsub_ST0_FT0(void)
3702
{
3703
    ST0 -= FT0;
3704
}
3705

    
3706
void helper_fsubr_ST0_FT0(void)
3707
{
3708
    ST0 = FT0 - ST0;
3709
}
3710

    
3711
void helper_fdiv_ST0_FT0(void)
3712
{
3713
    ST0 = helper_fdiv(ST0, FT0);
3714
}
3715

    
3716
void helper_fdivr_ST0_FT0(void)
3717
{
3718
    ST0 = helper_fdiv(FT0, ST0);
3719
}
3720

    
3721
/* fp operations between STN and ST0 */
3722

    
3723
void helper_fadd_STN_ST0(int st_index)
3724
{
3725
    ST(st_index) += ST0;
3726
}
3727

    
3728
void helper_fmul_STN_ST0(int st_index)
3729
{
3730
    ST(st_index) *= ST0;
3731
}
3732

    
3733
void helper_fsub_STN_ST0(int st_index)
3734
{
3735
    ST(st_index) -= ST0;
3736
}
3737

    
3738
void helper_fsubr_STN_ST0(int st_index)
3739
{
3740
    CPU86_LDouble *p;
3741
    p = &ST(st_index);
3742
    *p = ST0 - *p;
3743
}
3744

    
3745
void helper_fdiv_STN_ST0(int st_index)
3746
{
3747
    CPU86_LDouble *p;
3748
    p = &ST(st_index);
3749
    *p = helper_fdiv(*p, ST0);
3750
}
3751

    
3752
void helper_fdivr_STN_ST0(int st_index)
3753
{
3754
    CPU86_LDouble *p;
3755
    p = &ST(st_index);
3756
    *p = helper_fdiv(ST0, *p);
3757
}
3758

    
3759
/* misc FPU operations */
3760
void helper_fchs_ST0(void)
3761
{
3762
    ST0 = floatx_chs(ST0);
3763
}
3764

    
3765
void helper_fabs_ST0(void)
3766
{
3767
    ST0 = floatx_abs(ST0);
3768
}
3769

    
3770
void helper_fld1_ST0(void)
3771
{
3772
    ST0 = f15rk[1];
3773
}
3774

    
3775
void helper_fldl2t_ST0(void)
3776
{
3777
    ST0 = f15rk[6];
3778
}
3779

    
3780
void helper_fldl2e_ST0(void)
3781
{
3782
    ST0 = f15rk[5];
3783
}
3784

    
3785
void helper_fldpi_ST0(void)
3786
{
3787
    ST0 = f15rk[2];
3788
}
3789

    
3790
void helper_fldlg2_ST0(void)
3791
{
3792
    ST0 = f15rk[3];
3793
}
3794

    
3795
void helper_fldln2_ST0(void)
3796
{
3797
    ST0 = f15rk[4];
3798
}
3799

    
3800
void helper_fldz_ST0(void)
3801
{
3802
    ST0 = f15rk[0];
3803
}
3804

    
3805
void helper_fldz_FT0(void)
3806
{
3807
    FT0 = f15rk[0];
3808
}
3809

    
3810
uint32_t helper_fnstsw(void)
3811
{
3812
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3813
}
3814

    
3815
uint32_t helper_fnstcw(void)
3816
{
3817
    return env->fpuc;
3818
}
3819

    
3820
static void update_fp_status(void)
3821
{
3822
    int rnd_type;
3823

    
3824
    /* set rounding mode */
3825
    switch(env->fpuc & RC_MASK) {
3826
    default:
3827
    case RC_NEAR:
3828
        rnd_type = float_round_nearest_even;
3829
        break;
3830
    case RC_DOWN:
3831
        rnd_type = float_round_down;
3832
        break;
3833
    case RC_UP:
3834
        rnd_type = float_round_up;
3835
        break;
3836
    case RC_CHOP:
3837
        rnd_type = float_round_to_zero;
3838
        break;
3839
    }
3840
    set_float_rounding_mode(rnd_type, &env->fp_status);
3841
#ifdef FLOATX80
3842
    switch((env->fpuc >> 8) & 3) {
3843
    case 0:
3844
        rnd_type = 32;
3845
        break;
3846
    case 2:
3847
        rnd_type = 64;
3848
        break;
3849
    case 3:
3850
    default:
3851
        rnd_type = 80;
3852
        break;
3853
    }
3854
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3855
#endif
3856
}
3857

    
3858
void helper_fldcw(uint32_t val)
3859
{
3860
    env->fpuc = val;
3861
    update_fp_status();
3862
}
3863

    
3864
void helper_fclex(void)
3865
{
3866
    env->fpus &= 0x7f00;
3867
}
3868

    
3869
void helper_fwait(void)
3870
{
3871
    if (env->fpus & FPUS_SE)
3872
        fpu_raise_exception();
3873
}
3874

    
3875
void helper_fninit(void)
3876
{
3877
    env->fpus = 0;
3878
    env->fpstt = 0;
3879
    env->fpuc = 0x37f;
3880
    env->fptags[0] = 1;
3881
    env->fptags[1] = 1;
3882
    env->fptags[2] = 1;
3883
    env->fptags[3] = 1;
3884
    env->fptags[4] = 1;
3885
    env->fptags[5] = 1;
3886
    env->fptags[6] = 1;
3887
    env->fptags[7] = 1;
3888
}
3889

    
3890
/* BCD ops */
3891

    
3892
void helper_fbld_ST0(target_ulong ptr)
3893
{
3894
    CPU86_LDouble tmp;
3895
    uint64_t val;
3896
    unsigned int v;
3897
    int i;
3898

    
3899
    val = 0;
3900
    for(i = 8; i >= 0; i--) {
3901
        v = ldub(ptr + i);
3902
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3903
    }
3904
    tmp = val;
3905
    if (ldub(ptr + 9) & 0x80)
3906
        tmp = -tmp;
3907
    fpush();
3908
    ST0 = tmp;
3909
}
3910

    
3911
void helper_fbst_ST0(target_ulong ptr)
3912
{
3913
    int v;
3914
    target_ulong mem_ref, mem_end;
3915
    int64_t val;
3916

    
3917
    val = floatx_to_int64(ST0, &env->fp_status);
3918
    mem_ref = ptr;
3919
    mem_end = mem_ref + 9;
3920
    if (val < 0) {
3921
        stb(mem_end, 0x80);
3922
        val = -val;
3923
    } else {
3924
        stb(mem_end, 0x00);
3925
    }
3926
    while (mem_ref < mem_end) {
3927
        if (val == 0)
3928
            break;
3929
        v = val % 100;
3930
        val = val / 100;
3931
        v = ((v / 10) << 4) | (v % 10);
3932
        stb(mem_ref++, v);
3933
    }
3934
    while (mem_ref < mem_end) {
3935
        stb(mem_ref++, 0);
3936
    }
3937
}
3938

    
3939
void helper_f2xm1(void)
3940
{
3941
    ST0 = pow(2.0,ST0) - 1.0;
3942
}
3943

    
3944
void helper_fyl2x(void)
3945
{
3946
    CPU86_LDouble fptemp;
3947

    
3948
    fptemp = ST0;
3949
    if (fptemp>0.0){
3950
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3951
        ST1 *= fptemp;
3952
        fpop();
3953
    } else {
3954
        env->fpus &= (~0x4700);
3955
        env->fpus |= 0x400;
3956
    }
3957
}
3958

    
3959
void helper_fptan(void)
3960
{
3961
    CPU86_LDouble fptemp;
3962

    
3963
    fptemp = ST0;
3964
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3965
        env->fpus |= 0x400;
3966
    } else {
3967
        ST0 = tan(fptemp);
3968
        fpush();
3969
        ST0 = 1.0;
3970
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3971
        /* the above code is for  |arg| < 2**52 only */
3972
    }
3973
}
3974

    
3975
void helper_fpatan(void)
3976
{
3977
    CPU86_LDouble fptemp, fpsrcop;
3978

    
3979
    fpsrcop = ST1;
3980
    fptemp = ST0;
3981
    ST1 = atan2(fpsrcop,fptemp);
3982
    fpop();
3983
}
3984

    
3985
void helper_fxtract(void)
3986
{
3987
    CPU86_LDoubleU temp;
3988
    unsigned int expdif;
3989

    
3990
    temp.d = ST0;
3991
    expdif = EXPD(temp) - EXPBIAS;
3992
    /*DP exponent bias*/
3993
    ST0 = expdif;
3994
    fpush();
3995
    BIASEXPONENT(temp);
3996
    ST0 = temp.d;
3997
}
3998

    
3999
void helper_fprem1(void)
4000
{
4001
    CPU86_LDouble dblq, fpsrcop, fptemp;
4002
    CPU86_LDoubleU fpsrcop1, fptemp1;
4003
    int expdif;
4004
    signed long long int q;
4005

    
4006
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4007
        ST0 = 0.0 / 0.0; /* NaN */
4008
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4009
        return;
4010
    }
4011

    
4012
    fpsrcop = ST0;
4013
    fptemp = ST1;
4014
    fpsrcop1.d = fpsrcop;
4015
    fptemp1.d = fptemp;
4016
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4017

    
4018
    if (expdif < 0) {
4019
        /* optimisation? taken from the AMD docs */
4020
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4021
        /* ST0 is unchanged */
4022
        return;
4023
    }
4024

    
4025
    if (expdif < 53) {
4026
        dblq = fpsrcop / fptemp;
4027
        /* round dblq towards nearest integer */
4028
        dblq = rint(dblq);
4029
        ST0 = fpsrcop - fptemp * dblq;
4030

    
4031
        /* convert dblq to q by truncating towards zero */
4032
        if (dblq < 0.0)
4033
           q = (signed long long int)(-dblq);
4034
        else
4035
           q = (signed long long int)dblq;
4036

    
4037
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4038
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4039
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4040
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4041
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4042
    } else {
4043
        env->fpus |= 0x400;  /* C2 <-- 1 */
4044
        fptemp = pow(2.0, expdif - 50);
4045
        fpsrcop = (ST0 / ST1) / fptemp;
4046
        /* fpsrcop = integer obtained by chopping */
4047
        fpsrcop = (fpsrcop < 0.0) ?
4048
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4049
        ST0 -= (ST1 * fpsrcop * fptemp);
4050
    }
4051
}
4052

    
4053
void helper_fprem(void)
4054
{
4055
    CPU86_LDouble dblq, fpsrcop, fptemp;
4056
    CPU86_LDoubleU fpsrcop1, fptemp1;
4057
    int expdif;
4058
    signed long long int q;
4059

    
4060
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4061
       ST0 = 0.0 / 0.0; /* NaN */
4062
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4063
       return;
4064
    }
4065

    
4066
    fpsrcop = (CPU86_LDouble)ST0;
4067
    fptemp = (CPU86_LDouble)ST1;
4068
    fpsrcop1.d = fpsrcop;
4069
    fptemp1.d = fptemp;
4070
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4071

    
4072
    if (expdif < 0) {
4073
        /* optimisation? taken from the AMD docs */
4074
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4075
        /* ST0 is unchanged */
4076
        return;
4077
    }
4078

    
4079
    if ( expdif < 53 ) {
4080
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4081
        /* round dblq towards zero */
4082
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4083
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4084

    
4085
        /* convert dblq to q by truncating towards zero */
4086
        if (dblq < 0.0)
4087
           q = (signed long long int)(-dblq);
4088
        else
4089
           q = (signed long long int)dblq;
4090

    
4091
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4092
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4093
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4094
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4095
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4096
    } else {
4097
        int N = 32 + (expdif % 32); /* as per AMD docs */
4098
        env->fpus |= 0x400;  /* C2 <-- 1 */
4099
        fptemp = pow(2.0, (double)(expdif - N));
4100
        fpsrcop = (ST0 / ST1) / fptemp;
4101
        /* fpsrcop = integer obtained by chopping */
4102
        fpsrcop = (fpsrcop < 0.0) ?
4103
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4104
        ST0 -= (ST1 * fpsrcop * fptemp);
4105
    }
4106
}
4107

    
4108
void helper_fyl2xp1(void)
4109
{
4110
    CPU86_LDouble fptemp;
4111

    
4112
    fptemp = ST0;
4113
    if ((fptemp+1.0)>0.0) {
4114
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4115
        ST1 *= fptemp;
4116
        fpop();
4117
    } else {
4118
        env->fpus &= (~0x4700);
4119
        env->fpus |= 0x400;
4120
    }
4121
}
4122

    
4123
void helper_fsqrt(void)
4124
{
4125
    CPU86_LDouble fptemp;
4126

    
4127
    fptemp = ST0;
4128
    if (fptemp<0.0) {
4129
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4130
        env->fpus |= 0x400;
4131
    }
4132
    ST0 = sqrt(fptemp);
4133
}
4134

    
4135
void helper_fsincos(void)
4136
{
4137
    CPU86_LDouble fptemp;
4138

    
4139
    fptemp = ST0;
4140
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4141
        env->fpus |= 0x400;
4142
    } else {
4143
        ST0 = sin(fptemp);
4144
        fpush();
4145
        ST0 = cos(fptemp);
4146
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4147
        /* the above code is for  |arg| < 2**63 only */
4148
    }
4149
}
4150

    
4151
void helper_frndint(void)
4152
{
4153
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4154
}
4155

    
4156
void helper_fscale(void)
4157
{
4158
    ST0 = ldexp (ST0, (int)(ST1));
4159
}
4160

    
4161
void helper_fsin(void)
4162
{
4163
    CPU86_LDouble fptemp;
4164

    
4165
    fptemp = ST0;
4166
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4167
        env->fpus |= 0x400;
4168
    } else {
4169
        ST0 = sin(fptemp);
4170
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4171
        /* the above code is for  |arg| < 2**53 only */
4172
    }
4173
}
4174

    
4175
void helper_fcos(void)
4176
{
4177
    CPU86_LDouble fptemp;
4178

    
4179
    fptemp = ST0;
4180
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4181
        env->fpus |= 0x400;
4182
    } else {
4183
        ST0 = cos(fptemp);
4184
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4185
        /* the above code is for  |arg5 < 2**63 only */
4186
    }
4187
}
4188

    
4189
void helper_fxam_ST0(void)
4190
{
4191
    CPU86_LDoubleU temp;
4192
    int expdif;
4193

    
4194
    temp.d = ST0;
4195

    
4196
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4197
    if (SIGND(temp))
4198
        env->fpus |= 0x200; /* C1 <-- 1 */
4199

    
4200
    /* XXX: test fptags too */
4201
    expdif = EXPD(temp);
4202
    if (expdif == MAXEXPD) {
4203
#ifdef USE_X86LDOUBLE
4204
        if (MANTD(temp) == 0x8000000000000000ULL)
4205
#else
4206
        if (MANTD(temp) == 0)
4207
#endif
4208
            env->fpus |=  0x500 /*Infinity*/;
4209
        else
4210
            env->fpus |=  0x100 /*NaN*/;
4211
    } else if (expdif == 0) {
4212
        if (MANTD(temp) == 0)
4213
            env->fpus |=  0x4000 /*Zero*/;
4214
        else
4215
            env->fpus |= 0x4400 /*Denormal*/;
4216
    } else {
4217
        env->fpus |= 0x400;
4218
    }
4219
}
4220

    
4221
void helper_fstenv(target_ulong ptr, int data32)
4222
{
4223
    int fpus, fptag, exp, i;
4224
    uint64_t mant;
4225
    CPU86_LDoubleU tmp;
4226

    
4227
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4228
    fptag = 0;
4229
    for (i=7; i>=0; i--) {
4230
        fptag <<= 2;
4231
        if (env->fptags[i]) {
4232
            fptag |= 3;
4233
        } else {
4234
            tmp.d = env->fpregs[i].d;
4235
            exp = EXPD(tmp);
4236
            mant = MANTD(tmp);
4237
            if (exp == 0 && mant == 0) {
4238
                /* zero */
4239
                fptag |= 1;
4240
            } else if (exp == 0 || exp == MAXEXPD
4241
#ifdef USE_X86LDOUBLE
4242
                       || (mant & (1LL << 63)) == 0
4243
#endif
4244
                       ) {
4245
                /* NaNs, infinity, denormal */
4246
                fptag |= 2;
4247
            }
4248
        }
4249
    }
4250
    if (data32) {
4251
        /* 32 bit */
4252
        stl(ptr, env->fpuc);
4253
        stl(ptr + 4, fpus);
4254
        stl(ptr + 8, fptag);
4255
        stl(ptr + 12, 0); /* fpip */
4256
        stl(ptr + 16, 0); /* fpcs */
4257
        stl(ptr + 20, 0); /* fpoo */
4258
        stl(ptr + 24, 0); /* fpos */
4259
    } else {
4260
        /* 16 bit */
4261
        stw(ptr, env->fpuc);
4262
        stw(ptr + 2, fpus);
4263
        stw(ptr + 4, fptag);
4264
        stw(ptr + 6, 0);
4265
        stw(ptr + 8, 0);
4266
        stw(ptr + 10, 0);
4267
        stw(ptr + 12, 0);
4268
    }
4269
}
4270

    
4271
void helper_fldenv(target_ulong ptr, int data32)
4272
{
4273
    int i, fpus, fptag;
4274

    
4275
    if (data32) {
4276
        env->fpuc = lduw(ptr);
4277
        fpus = lduw(ptr + 4);
4278
        fptag = lduw(ptr + 8);
4279
    }
4280
    else {
4281
        env->fpuc = lduw(ptr);
4282
        fpus = lduw(ptr + 2);
4283
        fptag = lduw(ptr + 4);
4284
    }
4285
    env->fpstt = (fpus >> 11) & 7;
4286
    env->fpus = fpus & ~0x3800;
4287
    for(i = 0;i < 8; i++) {
4288
        env->fptags[i] = ((fptag & 3) == 3);
4289
        fptag >>= 2;
4290
    }
4291
}
4292

    
4293
void helper_fsave(target_ulong ptr, int data32)
4294
{
4295
    CPU86_LDouble tmp;
4296
    int i;
4297

    
4298
    helper_fstenv(ptr, data32);
4299

    
4300
    ptr += (14 << data32);
4301
    for(i = 0;i < 8; i++) {
4302
        tmp = ST(i);
4303
        helper_fstt(tmp, ptr);
4304
        ptr += 10;
4305
    }
4306

    
4307
    /* fninit */
4308
    env->fpus = 0;
4309
    env->fpstt = 0;
4310
    env->fpuc = 0x37f;
4311
    env->fptags[0] = 1;
4312
    env->fptags[1] = 1;
4313
    env->fptags[2] = 1;
4314
    env->fptags[3] = 1;
4315
    env->fptags[4] = 1;
4316
    env->fptags[5] = 1;
4317
    env->fptags[6] = 1;
4318
    env->fptags[7] = 1;
4319
}
4320

    
4321
void helper_frstor(target_ulong ptr, int data32)
4322
{
4323
    CPU86_LDouble tmp;
4324
    int i;
4325

    
4326
    helper_fldenv(ptr, data32);
4327
    ptr += (14 << data32);
4328

    
4329
    for(i = 0;i < 8; i++) {
4330
        tmp = helper_fldt(ptr);
4331
        ST(i) = tmp;
4332
        ptr += 10;
4333
    }
4334
}
4335

    
4336
void helper_fxsave(target_ulong ptr, int data64)
4337
{
4338
    int fpus, fptag, i, nb_xmm_regs;
4339
    CPU86_LDouble tmp;
4340
    target_ulong addr;
4341

    
4342
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4343
    fptag = 0;
4344
    for(i = 0; i < 8; i++) {
4345
        fptag |= (env->fptags[i] << i);
4346
    }
4347
    stw(ptr, env->fpuc);
4348
    stw(ptr + 2, fpus);
4349
    stw(ptr + 4, fptag ^ 0xff);
4350
#ifdef TARGET_X86_64
4351
    if (data64) {
4352
        stq(ptr + 0x08, 0); /* rip */
4353
        stq(ptr + 0x10, 0); /* rdp */
4354
    } else 
4355
#endif
4356
    {
4357
        stl(ptr + 0x08, 0); /* eip */
4358
        stl(ptr + 0x0c, 0); /* sel  */
4359
        stl(ptr + 0x10, 0); /* dp */
4360
        stl(ptr + 0x14, 0); /* sel  */
4361
    }
4362

    
4363
    addr = ptr + 0x20;
4364
    for(i = 0;i < 8; i++) {
4365
        tmp = ST(i);
4366
        helper_fstt(tmp, addr);
4367
        addr += 16;
4368
    }
4369

    
4370
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4371
        /* XXX: finish it */
4372
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4373
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4374
        if (env->hflags & HF_CS64_MASK)
4375
            nb_xmm_regs = 16;
4376
        else
4377
            nb_xmm_regs = 8;
4378
        addr = ptr + 0xa0;
4379
        /* Fast FXSAVE leaves out the XMM registers */
4380
        if (!(env->efer & MSR_EFER_FFXSR)
4381
          || (env->hflags & HF_CPL_MASK)
4382
          || !(env->hflags & HF_LMA_MASK)) {
4383
            for(i = 0; i < nb_xmm_regs; i++) {
4384
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4385
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4386
                addr += 16;
4387
            }
4388
        }
4389
    }
4390
}
4391

    
4392
void helper_fxrstor(target_ulong ptr, int data64)
4393
{
4394
    int i, fpus, fptag, nb_xmm_regs;
4395
    CPU86_LDouble tmp;
4396
    target_ulong addr;
4397

    
4398
    env->fpuc = lduw(ptr);
4399
    fpus = lduw(ptr + 2);
4400
    fptag = lduw(ptr + 4);
4401
    env->fpstt = (fpus >> 11) & 7;
4402
    env->fpus = fpus & ~0x3800;
4403
    fptag ^= 0xff;
4404
    for(i = 0;i < 8; i++) {
4405
        env->fptags[i] = ((fptag >> i) & 1);
4406
    }
4407

    
4408
    addr = ptr + 0x20;
4409
    for(i = 0;i < 8; i++) {
4410
        tmp = helper_fldt(addr);
4411
        ST(i) = tmp;
4412
        addr += 16;
4413
    }
4414

    
4415
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4416
        /* XXX: finish it */
4417
        env->mxcsr = ldl(ptr + 0x18);
4418
        //ldl(ptr + 0x1c);
4419
        if (env->hflags & HF_CS64_MASK)
4420
            nb_xmm_regs = 16;
4421
        else
4422
            nb_xmm_regs = 8;
4423
        addr = ptr + 0xa0;
4424
        /* Fast FXRESTORE leaves out the XMM registers */
4425
        if (!(env->efer & MSR_EFER_FFXSR)
4426
          || (env->hflags & HF_CPL_MASK)
4427
          || !(env->hflags & HF_LMA_MASK)) {
4428
            for(i = 0; i < nb_xmm_regs; i++) {
4429
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4430
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4431
                addr += 16;
4432
            }
4433
        }
4434
    }
4435
}
4436

    
4437
#ifndef USE_X86LDOUBLE
4438

    
4439
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4440
{
4441
    CPU86_LDoubleU temp;
4442
    int e;
4443

    
4444
    temp.d = f;
4445
    /* mantissa */
4446
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4447
    /* exponent + sign */
4448
    e = EXPD(temp) - EXPBIAS + 16383;
4449
    e |= SIGND(temp) >> 16;
4450
    *pexp = e;
4451
}
4452

    
4453
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4454
{
4455
    CPU86_LDoubleU temp;
4456
    int e;
4457
    uint64_t ll;
4458

    
4459
    /* XXX: handle overflow ? */
4460
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4461
    e |= (upper >> 4) & 0x800; /* sign */
4462
    ll = (mant >> 11) & ((1LL << 52) - 1);
4463
#ifdef __arm__
4464
    temp.l.upper = (e << 20) | (ll >> 32);
4465
    temp.l.lower = ll;
4466
#else
4467
    temp.ll = ll | ((uint64_t)e << 52);
4468
#endif
4469
    return temp.d;
4470
}
4471

    
4472
#else
4473

    
4474
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4475
{
4476
    CPU86_LDoubleU temp;
4477

    
4478
    temp.d = f;
4479
    *pmant = temp.l.lower;
4480
    *pexp = temp.l.upper;
4481
}
4482

    
4483
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4484
{
4485
    CPU86_LDoubleU temp;
4486

    
4487
    temp.l.upper = upper;
4488
    temp.l.lower = mant;
4489
    return temp.d;
4490
}
4491
#endif
4492

    
4493
#ifdef TARGET_X86_64
4494

    
4495
//#define DEBUG_MULDIV
4496

    
4497
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4498
{
4499
    *plow += a;
4500
    /* carry test */
4501
    if (*plow < a)
4502
        (*phigh)++;
4503
    *phigh += b;
4504
}
4505

    
4506
static void neg128(uint64_t *plow, uint64_t *phigh)
4507
{
4508
    *plow = ~ *plow;
4509
    *phigh = ~ *phigh;
4510
    add128(plow, phigh, 1, 0);
4511
}
4512

    
4513
/* return TRUE if overflow */
4514
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4515
{
4516
    uint64_t q, r, a1, a0;
4517
    int i, qb, ab;
4518

    
4519
    a0 = *plow;
4520
    a1 = *phigh;
4521
    if (a1 == 0) {
4522
        q = a0 / b;
4523
        r = a0 % b;
4524
        *plow = q;
4525
        *phigh = r;
4526
    } else {
4527
        if (a1 >= b)
4528
            return 1;
4529
        /* XXX: use a better algorithm */
4530
        for(i = 0; i < 64; i++) {
4531
            ab = a1 >> 63;
4532
            a1 = (a1 << 1) | (a0 >> 63);
4533
            if (ab || a1 >= b) {
4534
                a1 -= b;
4535
                qb = 1;
4536
            } else {
4537
                qb = 0;
4538
            }
4539
            a0 = (a0 << 1) | qb;
4540
        }
4541
#if defined(DEBUG_MULDIV)
4542
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4543
               *phigh, *plow, b, a0, a1);
4544
#endif
4545
        *plow = a0;
4546
        *phigh = a1;
4547
    }
4548
    return 0;
4549
}
4550

    
4551
/* return TRUE if overflow */
4552
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4553
{
4554
    int sa, sb;
4555
    sa = ((int64_t)*phigh < 0);
4556
    if (sa)
4557
        neg128(plow, phigh);
4558
    sb = (b < 0);
4559
    if (sb)
4560
        b = -b;
4561
    if (div64(plow, phigh, b) != 0)
4562
        return 1;
4563
    if (sa ^ sb) {
4564
        if (*plow > (1ULL << 63))
4565
            return 1;
4566
        *plow = - *plow;
4567
    } else {
4568
        if (*plow >= (1ULL << 63))
4569
            return 1;
4570
    }
4571
    if (sa)
4572
        *phigh = - *phigh;
4573
    return 0;
4574
}
4575

    
4576
void helper_mulq_EAX_T0(target_ulong t0)
4577
{
4578
    uint64_t r0, r1;
4579

    
4580
    mulu64(&r0, &r1, EAX, t0);
4581
    EAX = r0;
4582
    EDX = r1;
4583
    CC_DST = r0;
4584
    CC_SRC = r1;
4585
}
4586

    
4587
void helper_imulq_EAX_T0(target_ulong t0)
4588
{
4589
    uint64_t r0, r1;
4590

    
4591
    muls64(&r0, &r1, EAX, t0);
4592
    EAX = r0;
4593
    EDX = r1;
4594
    CC_DST = r0;
4595
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4596
}
4597

    
4598
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4599
{
4600
    uint64_t r0, r1;
4601

    
4602
    muls64(&r0, &r1, t0, t1);
4603
    CC_DST = r0;
4604
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4605
    return r0;
4606
}
4607

    
4608
void helper_divq_EAX(target_ulong t0)
4609
{
4610
    uint64_t r0, r1;
4611
    if (t0 == 0) {
4612
        raise_exception(EXCP00_DIVZ);
4613
    }
4614
    r0 = EAX;
4615
    r1 = EDX;
4616
    if (div64(&r0, &r1, t0))
4617
        raise_exception(EXCP00_DIVZ);
4618
    EAX = r0;
4619
    EDX = r1;
4620
}
4621

    
4622
void helper_idivq_EAX(target_ulong t0)
4623
{
4624
    uint64_t r0, r1;
4625
    if (t0 == 0) {
4626
        raise_exception(EXCP00_DIVZ);
4627
    }
4628
    r0 = EAX;
4629
    r1 = EDX;
4630
    if (idiv64(&r0, &r1, t0))
4631
        raise_exception(EXCP00_DIVZ);
4632
    EAX = r0;
4633
    EDX = r1;
4634
}
4635
#endif
4636

    
4637
static void do_hlt(void)
4638
{
4639
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4640
    env->halted = 1;
4641
    env->exception_index = EXCP_HLT;
4642
    cpu_loop_exit();
4643
}
4644

    
4645
void helper_hlt(int next_eip_addend)
4646
{
4647
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4648
    EIP += next_eip_addend;
4649
    
4650
    do_hlt();
4651
}
4652

    
4653
void helper_monitor(target_ulong ptr)
4654
{
4655
    if ((uint32_t)ECX != 0)
4656
        raise_exception(EXCP0D_GPF);
4657
    /* XXX: store address ? */
4658
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4659
}
4660

    
4661
void helper_mwait(int next_eip_addend)
4662
{
4663
    if ((uint32_t)ECX != 0)
4664
        raise_exception(EXCP0D_GPF);
4665
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4666
    EIP += next_eip_addend;
4667

    
4668
    /* XXX: not complete but not completely erroneous */
4669
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4670
        /* more than one CPU: do not sleep because another CPU may
4671
           wake this one */
4672
    } else {
4673
        do_hlt();
4674
    }
4675
}
4676

    
4677
void helper_debug(void)
4678
{
4679
    env->exception_index = EXCP_DEBUG;
4680
    cpu_loop_exit();
4681
}
4682

    
4683
void helper_raise_interrupt(int intno, int next_eip_addend)
4684
{
4685
    raise_interrupt(intno, 1, 0, next_eip_addend);
4686
}
4687

    
4688
void helper_raise_exception(int exception_index)
4689
{
4690
    raise_exception(exception_index);
4691
}
4692

    
4693
void helper_cli(void)
4694
{
4695
    env->eflags &= ~IF_MASK;
4696
}
4697

    
4698
void helper_sti(void)
4699
{
4700
    env->eflags |= IF_MASK;
4701
}
4702

    
4703
#if 0
4704
/* vm86plus instructions */
4705
void helper_cli_vm(void)
4706
{
4707
    env->eflags &= ~VIF_MASK;
4708
}
4709

4710
void helper_sti_vm(void)
4711
{
4712
    env->eflags |= VIF_MASK;
4713
    if (env->eflags & VIP_MASK) {
4714
        raise_exception(EXCP0D_GPF);
4715
    }
4716
}
4717
#endif
4718

    
4719
void helper_set_inhibit_irq(void)
4720
{
4721
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4722
}
4723

    
4724
void helper_reset_inhibit_irq(void)
4725
{
4726
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4727
}
4728

    
4729
void helper_boundw(target_ulong a0, int v)
4730
{
4731
    int low, high;
4732
    low = ldsw(a0);
4733
    high = ldsw(a0 + 2);
4734
    v = (int16_t)v;
4735
    if (v < low || v > high) {
4736
        raise_exception(EXCP05_BOUND);
4737
    }
4738
}
4739

    
4740
void helper_boundl(target_ulong a0, int v)
4741
{
4742
    int low, high;
4743
    low = ldl(a0);
4744
    high = ldl(a0 + 4);
4745
    if (v < low || v > high) {
4746
        raise_exception(EXCP05_BOUND);
4747
    }
4748
}
4749

    
4750
static float approx_rsqrt(float a)
4751
{
4752
    return 1.0 / sqrt(a);
4753
}
4754

    
4755
static float approx_rcp(float a)
4756
{
4757
    return 1.0 / a;
4758
}
4759

    
4760
#if !defined(CONFIG_USER_ONLY)
4761

    
4762
#define MMUSUFFIX _mmu
4763

    
4764
#define SHIFT 0
4765
#include "softmmu_template.h"
4766

    
4767
#define SHIFT 1
4768
#include "softmmu_template.h"
4769

    
4770
#define SHIFT 2
4771
#include "softmmu_template.h"
4772

    
4773
#define SHIFT 3
4774
#include "softmmu_template.h"
4775

    
4776
#endif
4777

    
4778
#if !defined(CONFIG_USER_ONLY)
4779
/* try to fill the TLB and return an exception if error. If retaddr is
4780
   NULL, it means that the function was called in C code (i.e. not
4781
   from generated code or from helper.c) */
4782
/* XXX: fix it to restore all registers */
4783
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4784
{
4785
    TranslationBlock *tb;
4786
    int ret;
4787
    unsigned long pc;
4788
    CPUX86State *saved_env;
4789

    
4790
    /* XXX: hack to restore env in all cases, even if not called from
4791
       generated code */
4792
    saved_env = env;
4793
    env = cpu_single_env;
4794

    
4795
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4796
    if (ret) {
4797
        if (retaddr) {
4798
            /* now we have a real cpu fault */
4799
            pc = (unsigned long)retaddr;
4800
            tb = tb_find_pc(pc);
4801
            if (tb) {
4802
                /* the PC is inside the translated code. It means that we have
4803
                   a virtual CPU fault */
4804
                cpu_restore_state(tb, env, pc, NULL);
4805
            }
4806
        }
4807
        raise_exception_err(env->exception_index, env->error_code);
4808
    }
4809
    env = saved_env;
4810
}
4811
#endif
4812

    
4813
/* Secure Virtual Machine helpers */
4814

    
4815
#if defined(CONFIG_USER_ONLY)
4816

    
4817
void helper_vmrun(int aflag, int next_eip_addend)
4818
{ 
4819
}
4820
void helper_vmmcall(void) 
4821
{ 
4822
}
4823
void helper_vmload(int aflag)
4824
{ 
4825
}
4826
void helper_vmsave(int aflag)
4827
{ 
4828
}
4829
void helper_stgi(void)
4830
{
4831
}
4832
void helper_clgi(void)
4833
{
4834
}
4835
void helper_skinit(void) 
4836
{ 
4837
}
4838
void helper_invlpga(int aflag)
4839
{ 
4840
}
4841
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4842
{ 
4843
}
4844
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4845
{
4846
}
4847

    
4848
void helper_svm_check_io(uint32_t port, uint32_t param, 
4849
                         uint32_t next_eip_addend)
4850
{
4851
}
4852
#else
4853

    
4854
static inline void svm_save_seg(target_phys_addr_t addr,
4855
                                const SegmentCache *sc)
4856
{
4857
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4858
             sc->selector);
4859
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4860
             sc->base);
4861
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4862
             sc->limit);
4863
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4864
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4865
}
4866
                                
4867
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4868
{
4869
    unsigned int flags;
4870

    
4871
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4872
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4873
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4874
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4875
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4876
}
4877

    
4878
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4879
                                      CPUState *env, int seg_reg)
4880
{
4881
    SegmentCache sc1, *sc = &sc1;
4882
    svm_load_seg(addr, sc);
4883
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4884
                           sc->base, sc->limit, sc->flags);
4885
}
4886

    
4887
void helper_vmrun(int aflag, int next_eip_addend)
4888
{
4889
    target_ulong addr;
4890
    uint32_t event_inj;
4891
    uint32_t int_ctl;
4892

    
4893
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4894

    
4895
    if (aflag == 2)
4896
        addr = EAX;
4897
    else
4898
        addr = (uint32_t)EAX;
4899

    
4900
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4901

    
4902
    env->vm_vmcb = addr;
4903

    
4904
    /* save the current CPU state in the hsave page */
4905
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4906
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4907

    
4908
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4909
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4910

    
4911
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4912
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4913
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4914
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4915
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4916
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4917

    
4918
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4919
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4920

    
4921
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4922
                  &env->segs[R_ES]);
4923
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4924
                 &env->segs[R_CS]);
4925
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4926
                 &env->segs[R_SS]);
4927
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4928
                 &env->segs[R_DS]);
4929

    
4930
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4931
             EIP + next_eip_addend);
4932
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4933
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4934

    
4935
    /* load the interception bitmaps so we do not need to access the
4936
       vmcb in svm mode */
4937
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4938
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4939
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4940
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4941
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4942
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4943

    
4944
    /* enable intercepts */
4945
    env->hflags |= HF_SVMI_MASK;
4946

    
4947
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4948

    
4949
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4950
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4951

    
4952
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4953
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4954

    
4955
    /* clear exit_info_2 so we behave like the real hardware */
4956
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4957

    
4958
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4959
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4960
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4961
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4962
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4963
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4964
    if (int_ctl & V_INTR_MASKING_MASK) {
4965
        env->v_tpr = int_ctl & V_TPR_MASK;
4966
        env->hflags2 |= HF2_VINTR_MASK;
4967
        if (env->eflags & IF_MASK)
4968
            env->hflags2 |= HF2_HIF_MASK;
4969
    }
4970

    
4971
    cpu_load_efer(env, 
4972
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4973
    env->eflags = 0;
4974
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4975
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4976
    CC_OP = CC_OP_EFLAGS;
4977

    
4978
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4979
                       env, R_ES);
4980
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4981
                       env, R_CS);
4982
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4983
                       env, R_SS);
4984
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4985
                       env, R_DS);
4986

    
4987
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4988
    env->eip = EIP;
4989
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4990
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4991
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4992
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4993
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4994

    
4995
    /* FIXME: guest state consistency checks */
4996

    
4997
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4998
        case TLB_CONTROL_DO_NOTHING:
4999
            break;
5000
        case TLB_CONTROL_FLUSH_ALL_ASID:
5001
            /* FIXME: this is not 100% correct but should work for now */
5002
            tlb_flush(env, 1);
5003
        break;
5004
    }
5005

    
5006
    env->hflags2 |= HF2_GIF_MASK;
5007

    
5008
    if (int_ctl & V_IRQ_MASK) {
5009
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5010
    }
5011

    
5012
    /* maybe we need to inject an event */
5013
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5014
    if (event_inj & SVM_EVTINJ_VALID) {
5015
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5016
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5017
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5018

    
5019
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5020
        /* FIXME: need to implement valid_err */
5021
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5022
        case SVM_EVTINJ_TYPE_INTR:
5023
                env->exception_index = vector;
5024
                env->error_code = event_inj_err;
5025
                env->exception_is_int = 0;
5026
                env->exception_next_eip = -1;
5027
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5028
                /* XXX: is it always correct ? */
5029
                do_interrupt(vector, 0, 0, 0, 1);
5030
                break;
5031
        case SVM_EVTINJ_TYPE_NMI:
5032
                env->exception_index = EXCP02_NMI;
5033
                env->error_code = event_inj_err;
5034
                env->exception_is_int = 0;
5035
                env->exception_next_eip = EIP;
5036
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5037
                cpu_loop_exit();
5038
                break;
5039
        case SVM_EVTINJ_TYPE_EXEPT:
5040
                env->exception_index = vector;
5041
                env->error_code = event_inj_err;
5042
                env->exception_is_int = 0;
5043
                env->exception_next_eip = -1;
5044
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5045
                cpu_loop_exit();
5046
                break;
5047
        case SVM_EVTINJ_TYPE_SOFT:
5048
                env->exception_index = vector;
5049
                env->error_code = event_inj_err;
5050
                env->exception_is_int = 1;
5051
                env->exception_next_eip = EIP;
5052
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5053
                cpu_loop_exit();
5054
                break;
5055
        }
5056
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5057
    }
5058
}
5059

    
5060
void helper_vmmcall(void)
5061
{
5062
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5063
    raise_exception(EXCP06_ILLOP);
5064
}
5065

    
5066
void helper_vmload(int aflag)
5067
{
5068
    target_ulong addr;
5069
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5070

    
5071
    if (aflag == 2)
5072
        addr = EAX;
5073
    else
5074
        addr = (uint32_t)EAX;
5075

    
5076
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5077
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5078
                env->segs[R_FS].base);
5079

    
5080
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5081
                       env, R_FS);
5082
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5083
                       env, R_GS);
5084
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5085
                 &env->tr);
5086
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5087
                 &env->ldt);
5088

    
5089
#ifdef TARGET_X86_64
5090
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5091
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5092
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5093
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5094
#endif
5095
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5096
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5097
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5098
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5099
}
5100

    
5101
void helper_vmsave(int aflag)
5102
{
5103
    target_ulong addr;
5104
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5105

    
5106
    if (aflag == 2)
5107
        addr = EAX;
5108
    else
5109
        addr = (uint32_t)EAX;
5110

    
5111
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5112
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5113
                env->segs[R_FS].base);
5114

    
5115
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5116
                 &env->segs[R_FS]);
5117
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5118
                 &env->segs[R_GS]);
5119
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5120
                 &env->tr);
5121
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5122
                 &env->ldt);
5123

    
5124
#ifdef TARGET_X86_64
5125
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5126
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5127
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5128
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5129
#endif
5130
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5131
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5132
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5133
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5134
}
5135

    
5136
void helper_stgi(void)
5137
{
5138
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5139
    env->hflags2 |= HF2_GIF_MASK;
5140
}
5141

    
5142
void helper_clgi(void)
5143
{
5144
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5145
    env->hflags2 &= ~HF2_GIF_MASK;
5146
}
5147

    
5148
void helper_skinit(void)
5149
{
5150
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5151
    /* XXX: not implemented */
5152
    raise_exception(EXCP06_ILLOP);
5153
}
5154

    
5155
void helper_invlpga(int aflag)
5156
{
5157
    target_ulong addr;
5158
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5159
    
5160
    if (aflag == 2)
5161
        addr = EAX;
5162
    else
5163
        addr = (uint32_t)EAX;
5164

    
5165
    /* XXX: could use the ASID to see if it is needed to do the
5166
       flush */
5167
    tlb_flush_page(env, addr);
5168
}
5169

    
5170
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5171
{
5172
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5173
        return;
5174
    switch(type) {
5175
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5176
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5177
            helper_vmexit(type, param);
5178
        }
5179
        break;
5180
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5181
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5182
            helper_vmexit(type, param);
5183
        }
5184
        break;
5185
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5186
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5187
            helper_vmexit(type, param);
5188
        }
5189
        break;
5190
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5191
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5192
            helper_vmexit(type, param);
5193
        }
5194
        break;
5195
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5196
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5197
            helper_vmexit(type, param);
5198
        }
5199
        break;
5200
    case SVM_EXIT_MSR:
5201
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5202
            /* FIXME: this should be read in at vmrun (faster this way?) */
5203
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5204
            uint32_t t0, t1;
5205
            switch((uint32_t)ECX) {
5206
            case 0 ... 0x1fff:
5207
                t0 = (ECX * 2) % 8;
5208
                t1 = ECX / 8;
5209
                break;
5210
            case 0xc0000000 ... 0xc0001fff:
5211
                t0 = (8192 + ECX - 0xc0000000) * 2;
5212
                t1 = (t0 / 8);
5213
                t0 %= 8;
5214
                break;
5215
            case 0xc0010000 ... 0xc0011fff:
5216
                t0 = (16384 + ECX - 0xc0010000) * 2;
5217
                t1 = (t0 / 8);
5218
                t0 %= 8;
5219
                break;
5220
            default:
5221
                helper_vmexit(type, param);
5222
                t0 = 0;
5223
                t1 = 0;
5224
                break;
5225
            }
5226
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5227
                helper_vmexit(type, param);
5228
        }
5229
        break;
5230
    default:
5231
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5232
            helper_vmexit(type, param);
5233
        }
5234
        break;
5235
    }
5236
}
5237

    
5238
void helper_svm_check_io(uint32_t port, uint32_t param, 
5239
                         uint32_t next_eip_addend)
5240
{
5241
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5242
        /* FIXME: this should be read in at vmrun (faster this way?) */
5243
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5244
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5245
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5246
            /* next EIP */
5247
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5248
                     env->eip + next_eip_addend);
5249
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5250
        }
5251
    }
5252
}
5253

    
5254
/* Note: currently only 32 bits of exit_code are used */
5255
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5256
{
5257
    uint32_t int_ctl;
5258

    
5259
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5260
                exit_code, exit_info_1,
5261
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5262
                EIP);
5263

    
5264
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5265
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5266
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5267
    } else {
5268
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5269
    }
5270

    
5271
    /* Save the VM state in the vmcb */
5272
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5273
                 &env->segs[R_ES]);
5274
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5275
                 &env->segs[R_CS]);
5276
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5277
                 &env->segs[R_SS]);
5278
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5279
                 &env->segs[R_DS]);
5280

    
5281
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5282
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5283

    
5284
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5285
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5286

    
5287
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5288
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5289
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5290
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5291
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5292

    
5293
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5294
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5295
    int_ctl |= env->v_tpr & V_TPR_MASK;
5296
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5297
        int_ctl |= V_IRQ_MASK;
5298
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5299

    
5300
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5301
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5302
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5303
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5304
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5305
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5306
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5307

    
5308
    /* Reload the host state from vm_hsave */
5309
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5310
    env->hflags &= ~HF_SVMI_MASK;
5311
    env->intercept = 0;
5312
    env->intercept_exceptions = 0;
5313
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5314
    env->tsc_offset = 0;
5315

    
5316
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5317
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5318

    
5319
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5320
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5321

    
5322
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5323
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5324
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5325
    /* we need to set the efer after the crs so the hidden flags get
5326
       set properly */
5327
    cpu_load_efer(env, 
5328
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5329
    env->eflags = 0;
5330
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5331
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5332
    CC_OP = CC_OP_EFLAGS;
5333

    
5334
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5335
                       env, R_ES);
5336
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5337
                       env, R_CS);
5338
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5339
                       env, R_SS);
5340
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5341
                       env, R_DS);
5342

    
5343
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5344
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5345
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5346

    
5347
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5348
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5349

    
5350
    /* other setups */
5351
    cpu_x86_set_cpl(env, 0);
5352
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5353
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5354

    
5355
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5356
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5357
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5358
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5359

    
5360
    env->hflags2 &= ~HF2_GIF_MASK;
5361
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5362

    
5363
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5364

    
5365
    /* Clears the TSC_OFFSET inside the processor. */
5366

    
5367
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5368
       from the page table indicated the host's CR3. If the PDPEs contain
5369
       illegal state, the processor causes a shutdown. */
5370

    
5371
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5372
    env->cr[0] |= CR0_PE_MASK;
5373
    env->eflags &= ~VM_MASK;
5374

    
5375
    /* Disables all breakpoints in the host DR7 register. */
5376

    
5377
    /* Checks the reloaded host state for consistency. */
5378

    
5379
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5380
       host's code segment or non-canonical (in the case of long mode), a
5381
       #GP fault is delivered inside the host.) */
5382

    
5383
    /* remove any pending exception */
5384
    env->exception_index = -1;
5385
    env->error_code = 0;
5386
    env->old_exception = -1;
5387

    
5388
    cpu_loop_exit();
5389
}
5390

    
5391
#endif
5392

    
5393
/* MMX/SSE */
5394
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5395
void helper_enter_mmx(void)
5396
{
5397
    env->fpstt = 0;
5398
    *(uint32_t *)(env->fptags) = 0;
5399
    *(uint32_t *)(env->fptags + 4) = 0;
5400
}
5401

    
5402
void helper_emms(void)
5403
{
5404
    /* set to empty state */
5405
    *(uint32_t *)(env->fptags) = 0x01010101;
5406
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5407
}
5408

    
5409
/* XXX: suppress */
5410
void helper_movq(void *d, void *s)
5411
{
5412
    *(uint64_t *)d = *(uint64_t *)s;
5413
}
5414

    
5415
#define SHIFT 0
5416
#include "ops_sse.h"
5417

    
5418
#define SHIFT 1
5419
#include "ops_sse.h"
5420

    
5421
#define SHIFT 0
5422
#include "helper_template.h"
5423
#undef SHIFT
5424

    
5425
#define SHIFT 1
5426
#include "helper_template.h"
5427
#undef SHIFT
5428

    
5429
#define SHIFT 2
5430
#include "helper_template.h"
5431
#undef SHIFT
5432

    
5433
#ifdef TARGET_X86_64
5434

    
5435
#define SHIFT 3
5436
#include "helper_template.h"
5437
#undef SHIFT
5438

    
5439
#endif
5440

    
5441
/* bit operations */
5442
target_ulong helper_bsf(target_ulong t0)
5443
{
5444
    int count;
5445
    target_ulong res;
5446

    
5447
    res = t0;
5448
    count = 0;
5449
    while ((res & 1) == 0) {
5450
        count++;
5451
        res >>= 1;
5452
    }
5453
    return count;
5454
}
5455

    
5456
target_ulong helper_bsr(target_ulong t0)
5457
{
5458
    int count;
5459
    target_ulong res, mask;
5460
    
5461
    res = t0;
5462
    count = TARGET_LONG_BITS - 1;
5463
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5464
    while ((res & mask) == 0) {
5465
        count--;
5466
        res <<= 1;
5467
    }
5468
    return count;
5469
}
5470

    
5471

    
5472
static int compute_all_eflags(void)
5473
{
5474
    return CC_SRC;
5475
}
5476

    
5477
static int compute_c_eflags(void)
5478
{
5479
    return CC_SRC & CC_C;
5480
}
5481

    
5482
uint32_t helper_cc_compute_all(int op)
5483
{
5484
    switch (op) {
5485
    default: /* should never happen */ return 0;
5486

    
5487
    case CC_OP_EFLAGS: return compute_all_eflags();
5488

    
5489
    case CC_OP_MULB: return compute_all_mulb();
5490
    case CC_OP_MULW: return compute_all_mulw();
5491
    case CC_OP_MULL: return compute_all_mull();
5492

    
5493
    case CC_OP_ADDB: return compute_all_addb();
5494
    case CC_OP_ADDW: return compute_all_addw();
5495
    case CC_OP_ADDL: return compute_all_addl();
5496

    
5497
    case CC_OP_ADCB: return compute_all_adcb();
5498
    case CC_OP_ADCW: return compute_all_adcw();
5499
    case CC_OP_ADCL: return compute_all_adcl();
5500

    
5501
    case CC_OP_SUBB: return compute_all_subb();
5502
    case CC_OP_SUBW: return compute_all_subw();
5503
    case CC_OP_SUBL: return compute_all_subl();
5504

    
5505
    case CC_OP_SBBB: return compute_all_sbbb();
5506
    case CC_OP_SBBW: return compute_all_sbbw();
5507
    case CC_OP_SBBL: return compute_all_sbbl();
5508

    
5509
    case CC_OP_LOGICB: return compute_all_logicb();
5510
    case CC_OP_LOGICW: return compute_all_logicw();
5511
    case CC_OP_LOGICL: return compute_all_logicl();
5512

    
5513
    case CC_OP_INCB: return compute_all_incb();
5514
    case CC_OP_INCW: return compute_all_incw();
5515
    case CC_OP_INCL: return compute_all_incl();
5516

    
5517
    case CC_OP_DECB: return compute_all_decb();
5518
    case CC_OP_DECW: return compute_all_decw();
5519
    case CC_OP_DECL: return compute_all_decl();
5520

    
5521
    case CC_OP_SHLB: return compute_all_shlb();
5522
    case CC_OP_SHLW: return compute_all_shlw();
5523
    case CC_OP_SHLL: return compute_all_shll();
5524

    
5525
    case CC_OP_SARB: return compute_all_sarb();
5526
    case CC_OP_SARW: return compute_all_sarw();
5527
    case CC_OP_SARL: return compute_all_sarl();
5528

    
5529
#ifdef TARGET_X86_64
5530
    case CC_OP_MULQ: return compute_all_mulq();
5531

    
5532
    case CC_OP_ADDQ: return compute_all_addq();
5533

    
5534
    case CC_OP_ADCQ: return compute_all_adcq();
5535

    
5536
    case CC_OP_SUBQ: return compute_all_subq();
5537

    
5538
    case CC_OP_SBBQ: return compute_all_sbbq();
5539

    
5540
    case CC_OP_LOGICQ: return compute_all_logicq();
5541

    
5542
    case CC_OP_INCQ: return compute_all_incq();
5543

    
5544
    case CC_OP_DECQ: return compute_all_decq();
5545

    
5546
    case CC_OP_SHLQ: return compute_all_shlq();
5547

    
5548
    case CC_OP_SARQ: return compute_all_sarq();
5549
#endif
5550
    }
5551
}
5552

    
5553
uint32_t helper_cc_compute_c(int op)
5554
{
5555
    switch (op) {
5556
    default: /* should never happen */ return 0;
5557

    
5558
    case CC_OP_EFLAGS: return compute_c_eflags();
5559

    
5560
    case CC_OP_MULB: return compute_c_mull();
5561
    case CC_OP_MULW: return compute_c_mull();
5562
    case CC_OP_MULL: return compute_c_mull();
5563

    
5564
    case CC_OP_ADDB: return compute_c_addb();
5565
    case CC_OP_ADDW: return compute_c_addw();
5566
    case CC_OP_ADDL: return compute_c_addl();
5567

    
5568
    case CC_OP_ADCB: return compute_c_adcb();
5569
    case CC_OP_ADCW: return compute_c_adcw();
5570
    case CC_OP_ADCL: return compute_c_adcl();
5571

    
5572
    case CC_OP_SUBB: return compute_c_subb();
5573
    case CC_OP_SUBW: return compute_c_subw();
5574
    case CC_OP_SUBL: return compute_c_subl();
5575

    
5576
    case CC_OP_SBBB: return compute_c_sbbb();
5577
    case CC_OP_SBBW: return compute_c_sbbw();
5578
    case CC_OP_SBBL: return compute_c_sbbl();
5579

    
5580
    case CC_OP_LOGICB: return compute_c_logicb();
5581
    case CC_OP_LOGICW: return compute_c_logicw();
5582
    case CC_OP_LOGICL: return compute_c_logicl();
5583

    
5584
    case CC_OP_INCB: return compute_c_incl();
5585
    case CC_OP_INCW: return compute_c_incl();
5586
    case CC_OP_INCL: return compute_c_incl();
5587

    
5588
    case CC_OP_DECB: return compute_c_incl();
5589
    case CC_OP_DECW: return compute_c_incl();
5590
    case CC_OP_DECL: return compute_c_incl();
5591

    
5592
    case CC_OP_SHLB: return compute_c_shlb();
5593
    case CC_OP_SHLW: return compute_c_shlw();
5594
    case CC_OP_SHLL: return compute_c_shll();
5595

    
5596
    case CC_OP_SARB: return compute_c_sarl();
5597
    case CC_OP_SARW: return compute_c_sarl();
5598
    case CC_OP_SARL: return compute_c_sarl();
5599

    
5600
#ifdef TARGET_X86_64
5601
    case CC_OP_MULQ: return compute_c_mull();
5602

    
5603
    case CC_OP_ADDQ: return compute_c_addq();
5604

    
5605
    case CC_OP_ADCQ: return compute_c_adcq();
5606

    
5607
    case CC_OP_SUBQ: return compute_c_subq();
5608

    
5609
    case CC_OP_SBBQ: return compute_c_sbbq();
5610

    
5611
    case CC_OP_LOGICQ: return compute_c_logicq();
5612

    
5613
    case CC_OP_INCQ: return compute_c_incl();
5614

    
5615
    case CC_OP_DECQ: return compute_c_incl();
5616

    
5617
    case CC_OP_SHLQ: return compute_c_shlq();
5618

    
5619
    case CC_OP_SARQ: return compute_c_sarl();
5620
#endif
5621
    }
5622
}