Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ d12d51d5

History | View | Annotate | Download (153.6 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#define CPU_NO_GLOBAL_REGS
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) do {            \
30
     if (loglevel & CPU_LOG_PCALL)       \
31
       fprintf(logfile, ## __VA_ARGS__); \
32
   } while (0)
33
#  define LOG_PCALL_STATE(env) do {                             \
34
    if (loglevel & CPU_LOG_PCALL)                               \
35
        cpu_dump_state((env), logfile, fprintf, X86_DUMP_CCOP); \
36
   } while (0)
37
#else
38
#  define LOG_PCALL(...) do { } while (0)
39
#  define LOG_PCALL_STATE(env) do { } while (0)
40
#endif
41

    
42

    
43
#if 0
44
#define raise_exception_err(a, b)\
45
do {\
46
    if (logfile)\
47
        fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
48
    (raise_exception_err)(a, b);\
49
} while (0)
50
#endif
51

    
52
static const uint8_t parity_table[256] = {
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
81
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
82
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
83
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85
};
86

    
87
/* modulo 17 table */
88
static const uint8_t rclw_table[32] = {
89
    0, 1, 2, 3, 4, 5, 6, 7,
90
    8, 9,10,11,12,13,14,15,
91
   16, 0, 1, 2, 3, 4, 5, 6,
92
    7, 8, 9,10,11,12,13,14,
93
};
94

    
95
/* modulo 9 table */
96
static const uint8_t rclb_table[32] = {
97
    0, 1, 2, 3, 4, 5, 6, 7,
98
    8, 0, 1, 2, 3, 4, 5, 6,
99
    7, 8, 0, 1, 2, 3, 4, 5,
100
    6, 7, 8, 0, 1, 2, 3, 4,
101
};
102

    
103
static const CPU86_LDouble f15rk[7] =
104
{
105
    0.00000000000000000000L,
106
    1.00000000000000000000L,
107
    3.14159265358979323851L,  /*pi*/
108
    0.30102999566398119523L,  /*lg2*/
109
    0.69314718055994530943L,  /*ln2*/
110
    1.44269504088896340739L,  /*l2e*/
111
    3.32192809488736234781L,  /*l2t*/
112
};
113

    
114
/* broken thread support */
115

    
116
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
117

    
118
void helper_lock(void)
119
{
120
    spin_lock(&global_cpu_lock);
121
}
122

    
123
void helper_unlock(void)
124
{
125
    spin_unlock(&global_cpu_lock);
126
}
127

    
128
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
129
{
130
    load_eflags(t0, update_mask);
131
}
132

    
133
target_ulong helper_read_eflags(void)
134
{
135
    uint32_t eflags;
136
    eflags = helper_cc_compute_all(CC_OP);
137
    eflags |= (DF & DF_MASK);
138
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
139
    return eflags;
140
}
141

    
142
/* return non zero if error */
143
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
144
                               int selector)
145
{
146
    SegmentCache *dt;
147
    int index;
148
    target_ulong ptr;
149

    
150
    if (selector & 0x4)
151
        dt = &env->ldt;
152
    else
153
        dt = &env->gdt;
154
    index = selector & ~7;
155
    if ((index + 7) > dt->limit)
156
        return -1;
157
    ptr = dt->base + index;
158
    *e1_ptr = ldl_kernel(ptr);
159
    *e2_ptr = ldl_kernel(ptr + 4);
160
    return 0;
161
}
162

    
163
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
164
{
165
    unsigned int limit;
166
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
167
    if (e2 & DESC_G_MASK)
168
        limit = (limit << 12) | 0xfff;
169
    return limit;
170
}
171

    
172
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
173
{
174
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
175
}
176

    
177
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
178
{
179
    sc->base = get_seg_base(e1, e2);
180
    sc->limit = get_seg_limit(e1, e2);
181
    sc->flags = e2;
182
}
183

    
184
/* init the segment cache in vm86 mode. */
185
static inline void load_seg_vm(int seg, int selector)
186
{
187
    selector &= 0xffff;
188
    cpu_x86_load_seg_cache(env, seg, selector,
189
                           (selector << 4), 0xffff, 0);
190
}
191

    
192
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
193
                                       uint32_t *esp_ptr, int dpl)
194
{
195
    int type, index, shift;
196

    
197
#if 0
198
    {
199
        int i;
200
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
201
        for(i=0;i<env->tr.limit;i++) {
202
            printf("%02x ", env->tr.base[i]);
203
            if ((i & 7) == 7) printf("\n");
204
        }
205
        printf("\n");
206
    }
207
#endif
208

    
209
    if (!(env->tr.flags & DESC_P_MASK))
210
        cpu_abort(env, "invalid tss");
211
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
212
    if ((type & 7) != 1)
213
        cpu_abort(env, "invalid tss type");
214
    shift = type >> 3;
215
    index = (dpl * 4 + 2) << shift;
216
    if (index + (4 << shift) - 1 > env->tr.limit)
217
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
218
    if (shift == 0) {
219
        *esp_ptr = lduw_kernel(env->tr.base + index);
220
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
221
    } else {
222
        *esp_ptr = ldl_kernel(env->tr.base + index);
223
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
224
    }
225
}
226

    
227
/* XXX: merge with load_seg() */
228
static void tss_load_seg(int seg_reg, int selector)
229
{
230
    uint32_t e1, e2;
231
    int rpl, dpl, cpl;
232

    
233
    if ((selector & 0xfffc) != 0) {
234
        if (load_segment(&e1, &e2, selector) != 0)
235
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236
        if (!(e2 & DESC_S_MASK))
237
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
        rpl = selector & 3;
239
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
240
        cpl = env->hflags & HF_CPL_MASK;
241
        if (seg_reg == R_CS) {
242
            if (!(e2 & DESC_CS_MASK))
243
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244
            /* XXX: is it correct ? */
245
            if (dpl != rpl)
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if ((e2 & DESC_C_MASK) && dpl > rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else if (seg_reg == R_SS) {
250
            /* SS must be writable data */
251
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            if (dpl != cpl || dpl != rpl)
254
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
255
        } else {
256
            /* not readable code */
257
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
258
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259
            /* if data or non conforming code, checks the rights */
260
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
261
                if (dpl < cpl || dpl < rpl)
262
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263
            }
264
        }
265
        if (!(e2 & DESC_P_MASK))
266
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
267
        cpu_x86_load_seg_cache(env, seg_reg, selector,
268
                       get_seg_base(e1, e2),
269
                       get_seg_limit(e1, e2),
270
                       e2);
271
    } else {
272
        if (seg_reg == R_SS || seg_reg == R_CS)
273
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
274
    }
275
}
276

    
277
#define SWITCH_TSS_JMP  0
278
#define SWITCH_TSS_IRET 1
279
#define SWITCH_TSS_CALL 2
280

    
281
/* XXX: restore CPU state in registers (PowerPC case) */
282
static void switch_tss(int tss_selector,
283
                       uint32_t e1, uint32_t e2, int source,
284
                       uint32_t next_eip)
285
{
286
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
287
    target_ulong tss_base;
288
    uint32_t new_regs[8], new_segs[6];
289
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
290
    uint32_t old_eflags, eflags_mask;
291
    SegmentCache *dt;
292
    int index;
293
    target_ulong ptr;
294

    
295
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
297

    
298
    /* if task gate, we read the TSS segment and we load it */
299
    if (type == 5) {
300
        if (!(e2 & DESC_P_MASK))
301
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
        tss_selector = e1 >> 16;
303
        if (tss_selector & 4)
304
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
305
        if (load_segment(&e1, &e2, tss_selector) != 0)
306
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307
        if (e2 & DESC_S_MASK)
308
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
309
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
310
        if ((type & 7) != 1)
311
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312
    }
313

    
314
    if (!(e2 & DESC_P_MASK))
315
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
316

    
317
    if (type & 8)
318
        tss_limit_max = 103;
319
    else
320
        tss_limit_max = 43;
321
    tss_limit = get_seg_limit(e1, e2);
322
    tss_base = get_seg_base(e1, e2);
323
    if ((tss_selector & 4) != 0 ||
324
        tss_limit < tss_limit_max)
325
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
326
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
327
    if (old_type & 8)
328
        old_tss_limit_max = 103;
329
    else
330
        old_tss_limit_max = 43;
331

    
332
    /* read all the registers from the new TSS */
333
    if (type & 8) {
334
        /* 32 bit */
335
        new_cr3 = ldl_kernel(tss_base + 0x1c);
336
        new_eip = ldl_kernel(tss_base + 0x20);
337
        new_eflags = ldl_kernel(tss_base + 0x24);
338
        for(i = 0; i < 8; i++)
339
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
340
        for(i = 0; i < 6; i++)
341
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
342
        new_ldt = lduw_kernel(tss_base + 0x60);
343
        new_trap = ldl_kernel(tss_base + 0x64);
344
    } else {
345
        /* 16 bit */
346
        new_cr3 = 0;
347
        new_eip = lduw_kernel(tss_base + 0x0e);
348
        new_eflags = lduw_kernel(tss_base + 0x10);
349
        for(i = 0; i < 8; i++)
350
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
351
        for(i = 0; i < 4; i++)
352
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
353
        new_ldt = lduw_kernel(tss_base + 0x2a);
354
        new_segs[R_FS] = 0;
355
        new_segs[R_GS] = 0;
356
        new_trap = 0;
357
    }
358

    
359
    /* NOTE: we must avoid memory exceptions during the task switch,
360
       so we make dummy accesses before */
361
    /* XXX: it can still fail in some cases, so a bigger hack is
362
       necessary to valid the TLB after having done the accesses */
363

    
364
    v1 = ldub_kernel(env->tr.base);
365
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
366
    stb_kernel(env->tr.base, v1);
367
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
368

    
369
    /* clear busy bit (it is restartable) */
370
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
371
        target_ulong ptr;
372
        uint32_t e2;
373
        ptr = env->gdt.base + (env->tr.selector & ~7);
374
        e2 = ldl_kernel(ptr + 4);
375
        e2 &= ~DESC_TSS_BUSY_MASK;
376
        stl_kernel(ptr + 4, e2);
377
    }
378
    old_eflags = compute_eflags();
379
    if (source == SWITCH_TSS_IRET)
380
        old_eflags &= ~NT_MASK;
381

    
382
    /* save the current state in the old TSS */
383
    if (type & 8) {
384
        /* 32 bit */
385
        stl_kernel(env->tr.base + 0x20, next_eip);
386
        stl_kernel(env->tr.base + 0x24, old_eflags);
387
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
388
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
389
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
390
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
391
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
392
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
393
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
394
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
395
        for(i = 0; i < 6; i++)
396
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
397
    } else {
398
        /* 16 bit */
399
        stw_kernel(env->tr.base + 0x0e, next_eip);
400
        stw_kernel(env->tr.base + 0x10, old_eflags);
401
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
402
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
403
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
404
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
405
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
406
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
407
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
408
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
409
        for(i = 0; i < 4; i++)
410
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
411
    }
412

    
413
    /* now if an exception occurs, it will occurs in the next task
414
       context */
415

    
416
    if (source == SWITCH_TSS_CALL) {
417
        stw_kernel(tss_base, env->tr.selector);
418
        new_eflags |= NT_MASK;
419
    }
420

    
421
    /* set busy bit */
422
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
423
        target_ulong ptr;
424
        uint32_t e2;
425
        ptr = env->gdt.base + (tss_selector & ~7);
426
        e2 = ldl_kernel(ptr + 4);
427
        e2 |= DESC_TSS_BUSY_MASK;
428
        stl_kernel(ptr + 4, e2);
429
    }
430

    
431
    /* set the new CPU state */
432
    /* from this point, any exception which occurs can give problems */
433
    env->cr[0] |= CR0_TS_MASK;
434
    env->hflags |= HF_TS_MASK;
435
    env->tr.selector = tss_selector;
436
    env->tr.base = tss_base;
437
    env->tr.limit = tss_limit;
438
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
439

    
440
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
441
        cpu_x86_update_cr3(env, new_cr3);
442
    }
443

    
444
    /* load all registers without an exception, then reload them with
445
       possible exception */
446
    env->eip = new_eip;
447
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
448
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
449
    if (!(type & 8))
450
        eflags_mask &= 0xffff;
451
    load_eflags(new_eflags, eflags_mask);
452
    /* XXX: what to do in 16 bit case ? */
453
    EAX = new_regs[0];
454
    ECX = new_regs[1];
455
    EDX = new_regs[2];
456
    EBX = new_regs[3];
457
    ESP = new_regs[4];
458
    EBP = new_regs[5];
459
    ESI = new_regs[6];
460
    EDI = new_regs[7];
461
    if (new_eflags & VM_MASK) {
462
        for(i = 0; i < 6; i++)
463
            load_seg_vm(i, new_segs[i]);
464
        /* in vm86, CPL is always 3 */
465
        cpu_x86_set_cpl(env, 3);
466
    } else {
467
        /* CPL is set the RPL of CS */
468
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
469
        /* first just selectors as the rest may trigger exceptions */
470
        for(i = 0; i < 6; i++)
471
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
472
    }
473

    
474
    env->ldt.selector = new_ldt & ~4;
475
    env->ldt.base = 0;
476
    env->ldt.limit = 0;
477
    env->ldt.flags = 0;
478

    
479
    /* load the LDT */
480
    if (new_ldt & 4)
481
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482

    
483
    if ((new_ldt & 0xfffc) != 0) {
484
        dt = &env->gdt;
485
        index = new_ldt & ~7;
486
        if ((index + 7) > dt->limit)
487
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488
        ptr = dt->base + index;
489
        e1 = ldl_kernel(ptr);
490
        e2 = ldl_kernel(ptr + 4);
491
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
492
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
        if (!(e2 & DESC_P_MASK))
494
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
495
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
496
    }
497

    
498
    /* load the segments */
499
    if (!(new_eflags & VM_MASK)) {
500
        tss_load_seg(R_CS, new_segs[R_CS]);
501
        tss_load_seg(R_SS, new_segs[R_SS]);
502
        tss_load_seg(R_ES, new_segs[R_ES]);
503
        tss_load_seg(R_DS, new_segs[R_DS]);
504
        tss_load_seg(R_FS, new_segs[R_FS]);
505
        tss_load_seg(R_GS, new_segs[R_GS]);
506
    }
507

    
508
    /* check that EIP is in the CS segment limits */
509
    if (new_eip > env->segs[R_CS].limit) {
510
        /* XXX: different exception if CALL ? */
511
        raise_exception_err(EXCP0D_GPF, 0);
512
    }
513

    
514
#ifndef CONFIG_USER_ONLY
515
    /* reset local breakpoints */
516
    if (env->dr[7] & 0x55) {
517
        for (i = 0; i < 4; i++) {
518
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
519
                hw_breakpoint_remove(env, i);
520
        }
521
        env->dr[7] &= ~0x55;
522
    }
523
#endif
524
}
525

    
526
/* check if Port I/O is allowed in TSS */
527
static inline void check_io(int addr, int size)
528
{
529
    int io_offset, val, mask;
530

    
531
    /* TSS must be a valid 32 bit one */
532
    if (!(env->tr.flags & DESC_P_MASK) ||
533
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
534
        env->tr.limit < 103)
535
        goto fail;
536
    io_offset = lduw_kernel(env->tr.base + 0x66);
537
    io_offset += (addr >> 3);
538
    /* Note: the check needs two bytes */
539
    if ((io_offset + 1) > env->tr.limit)
540
        goto fail;
541
    val = lduw_kernel(env->tr.base + io_offset);
542
    val >>= (addr & 7);
543
    mask = (1 << size) - 1;
544
    /* all bits must be zero to allow the I/O */
545
    if ((val & mask) != 0) {
546
    fail:
547
        raise_exception_err(EXCP0D_GPF, 0);
548
    }
549
}
550

    
551
void helper_check_iob(uint32_t t0)
552
{
553
    check_io(t0, 1);
554
}
555

    
556
void helper_check_iow(uint32_t t0)
557
{
558
    check_io(t0, 2);
559
}
560

    
561
void helper_check_iol(uint32_t t0)
562
{
563
    check_io(t0, 4);
564
}
565

    
566
void helper_outb(uint32_t port, uint32_t data)
567
{
568
    cpu_outb(env, port, data & 0xff);
569
}
570

    
571
target_ulong helper_inb(uint32_t port)
572
{
573
    return cpu_inb(env, port);
574
}
575

    
576
void helper_outw(uint32_t port, uint32_t data)
577
{
578
    cpu_outw(env, port, data & 0xffff);
579
}
580

    
581
target_ulong helper_inw(uint32_t port)
582
{
583
    return cpu_inw(env, port);
584
}
585

    
586
void helper_outl(uint32_t port, uint32_t data)
587
{
588
    cpu_outl(env, port, data);
589
}
590

    
591
target_ulong helper_inl(uint32_t port)
592
{
593
    return cpu_inl(env, port);
594
}
595

    
596
static inline unsigned int get_sp_mask(unsigned int e2)
597
{
598
    if (e2 & DESC_B_MASK)
599
        return 0xffffffff;
600
    else
601
        return 0xffff;
602
}
603

    
604
#ifdef TARGET_X86_64
605
#define SET_ESP(val, sp_mask)\
606
do {\
607
    if ((sp_mask) == 0xffff)\
608
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
609
    else if ((sp_mask) == 0xffffffffLL)\
610
        ESP = (uint32_t)(val);\
611
    else\
612
        ESP = (val);\
613
} while (0)
614
#else
615
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
616
#endif
617

    
618
/* in 64-bit machines, this can overflow. So this segment addition macro
619
 * can be used to trim the value to 32-bit whenever needed */
620
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
621

    
622
/* XXX: add a is_user flag to have proper security support */
623
#define PUSHW(ssp, sp, sp_mask, val)\
624
{\
625
    sp -= 2;\
626
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
627
}
628

    
629
#define PUSHL(ssp, sp, sp_mask, val)\
630
{\
631
    sp -= 4;\
632
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
633
}
634

    
635
#define POPW(ssp, sp, sp_mask, val)\
636
{\
637
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
638
    sp += 2;\
639
}
640

    
641
#define POPL(ssp, sp, sp_mask, val)\
642
{\
643
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
644
    sp += 4;\
645
}
646

    
647
/* protected mode interrupt */
648
static void do_interrupt_protected(int intno, int is_int, int error_code,
649
                                   unsigned int next_eip, int is_hw)
650
{
651
    SegmentCache *dt;
652
    target_ulong ptr, ssp;
653
    int type, dpl, selector, ss_dpl, cpl;
654
    int has_error_code, new_stack, shift;
655
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
656
    uint32_t old_eip, sp_mask;
657

    
658
    has_error_code = 0;
659
    if (!is_int && !is_hw) {
660
        switch(intno) {
661
        case 8:
662
        case 10:
663
        case 11:
664
        case 12:
665
        case 13:
666
        case 14:
667
        case 17:
668
            has_error_code = 1;
669
            break;
670
        }
671
    }
672
    if (is_int)
673
        old_eip = next_eip;
674
    else
675
        old_eip = env->eip;
676

    
677
    dt = &env->idt;
678
    if (intno * 8 + 7 > dt->limit)
679
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
680
    ptr = dt->base + intno * 8;
681
    e1 = ldl_kernel(ptr);
682
    e2 = ldl_kernel(ptr + 4);
683
    /* check gate type */
684
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
685
    switch(type) {
686
    case 5: /* task gate */
687
        /* must do that check here to return the correct error code */
688
        if (!(e2 & DESC_P_MASK))
689
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
690
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
691
        if (has_error_code) {
692
            int type;
693
            uint32_t mask;
694
            /* push the error code */
695
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
696
            shift = type >> 3;
697
            if (env->segs[R_SS].flags & DESC_B_MASK)
698
                mask = 0xffffffff;
699
            else
700
                mask = 0xffff;
701
            esp = (ESP - (2 << shift)) & mask;
702
            ssp = env->segs[R_SS].base + esp;
703
            if (shift)
704
                stl_kernel(ssp, error_code);
705
            else
706
                stw_kernel(ssp, error_code);
707
            SET_ESP(esp, mask);
708
        }
709
        return;
710
    case 6: /* 286 interrupt gate */
711
    case 7: /* 286 trap gate */
712
    case 14: /* 386 interrupt gate */
713
    case 15: /* 386 trap gate */
714
        break;
715
    default:
716
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
717
        break;
718
    }
719
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
720
    cpl = env->hflags & HF_CPL_MASK;
721
    /* check privilege if software int */
722
    if (is_int && dpl < cpl)
723
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
724
    /* check valid bit */
725
    if (!(e2 & DESC_P_MASK))
726
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
727
    selector = e1 >> 16;
728
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
729
    if ((selector & 0xfffc) == 0)
730
        raise_exception_err(EXCP0D_GPF, 0);
731

    
732
    if (load_segment(&e1, &e2, selector) != 0)
733
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
737
    if (dpl > cpl)
738
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739
    if (!(e2 & DESC_P_MASK))
740
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
741
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
742
        /* to inner privilege */
743
        get_ss_esp_from_tss(&ss, &esp, dpl);
744
        if ((ss & 0xfffc) == 0)
745
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746
        if ((ss & 3) != dpl)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
751
        if (ss_dpl != dpl)
752
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753
        if (!(ss_e2 & DESC_S_MASK) ||
754
            (ss_e2 & DESC_CS_MASK) ||
755
            !(ss_e2 & DESC_W_MASK))
756
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757
        if (!(ss_e2 & DESC_P_MASK))
758
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759
        new_stack = 1;
760
        sp_mask = get_sp_mask(ss_e2);
761
        ssp = get_seg_base(ss_e1, ss_e2);
762
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
763
        /* to same privilege */
764
        if (env->eflags & VM_MASK)
765
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
766
        new_stack = 0;
767
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
768
        ssp = env->segs[R_SS].base;
769
        esp = ESP;
770
        dpl = cpl;
771
    } else {
772
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
773
        new_stack = 0; /* avoid warning */
774
        sp_mask = 0; /* avoid warning */
775
        ssp = 0; /* avoid warning */
776
        esp = 0; /* avoid warning */
777
    }
778

    
779
    shift = type >> 3;
780

    
781
#if 0
782
    /* XXX: check that enough room is available */
783
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
784
    if (env->eflags & VM_MASK)
785
        push_size += 8;
786
    push_size <<= shift;
787
#endif
788
    if (shift == 1) {
789
        if (new_stack) {
790
            if (env->eflags & VM_MASK) {
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
792
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
793
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
794
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
795
            }
796
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
797
            PUSHL(ssp, esp, sp_mask, ESP);
798
        }
799
        PUSHL(ssp, esp, sp_mask, compute_eflags());
800
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
801
        PUSHL(ssp, esp, sp_mask, old_eip);
802
        if (has_error_code) {
803
            PUSHL(ssp, esp, sp_mask, error_code);
804
        }
805
    } else {
806
        if (new_stack) {
807
            if (env->eflags & VM_MASK) {
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
809
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
810
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
811
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
812
            }
813
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
814
            PUSHW(ssp, esp, sp_mask, ESP);
815
        }
816
        PUSHW(ssp, esp, sp_mask, compute_eflags());
817
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
818
        PUSHW(ssp, esp, sp_mask, old_eip);
819
        if (has_error_code) {
820
            PUSHW(ssp, esp, sp_mask, error_code);
821
        }
822
    }
823

    
824
    if (new_stack) {
825
        if (env->eflags & VM_MASK) {
826
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
827
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
828
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
829
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
830
        }
831
        ss = (ss & ~3) | dpl;
832
        cpu_x86_load_seg_cache(env, R_SS, ss,
833
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
834
    }
835
    SET_ESP(esp, sp_mask);
836

    
837
    selector = (selector & ~3) | dpl;
838
    cpu_x86_load_seg_cache(env, R_CS, selector,
839
                   get_seg_base(e1, e2),
840
                   get_seg_limit(e1, e2),
841
                   e2);
842
    cpu_x86_set_cpl(env, dpl);
843
    env->eip = offset;
844

    
845
    /* interrupt gate clear IF mask */
846
    if ((type & 1) == 0) {
847
        env->eflags &= ~IF_MASK;
848
    }
849
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
850
}
851

    
852
#ifdef TARGET_X86_64
853

    
854
#define PUSHQ(sp, val)\
855
{\
856
    sp -= 8;\
857
    stq_kernel(sp, (val));\
858
}
859

    
860
#define POPQ(sp, val)\
861
{\
862
    val = ldq_kernel(sp);\
863
    sp += 8;\
864
}
865

    
866
static inline target_ulong get_rsp_from_tss(int level)
867
{
868
    int index;
869

    
870
#if 0
871
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
872
           env->tr.base, env->tr.limit);
873
#endif
874

    
875
    if (!(env->tr.flags & DESC_P_MASK))
876
        cpu_abort(env, "invalid tss");
877
    index = 8 * level + 4;
878
    if ((index + 7) > env->tr.limit)
879
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
880
    return ldq_kernel(env->tr.base + index);
881
}
882

    
883
/* 64 bit interrupt */
884
static void do_interrupt64(int intno, int is_int, int error_code,
885
                           target_ulong next_eip, int is_hw)
886
{
887
    SegmentCache *dt;
888
    target_ulong ptr;
889
    int type, dpl, selector, cpl, ist;
890
    int has_error_code, new_stack;
891
    uint32_t e1, e2, e3, ss;
892
    target_ulong old_eip, esp, offset;
893

    
894
    has_error_code = 0;
895
    if (!is_int && !is_hw) {
896
        switch(intno) {
897
        case 8:
898
        case 10:
899
        case 11:
900
        case 12:
901
        case 13:
902
        case 14:
903
        case 17:
904
            has_error_code = 1;
905
            break;
906
        }
907
    }
908
    if (is_int)
909
        old_eip = next_eip;
910
    else
911
        old_eip = env->eip;
912

    
913
    dt = &env->idt;
914
    if (intno * 16 + 15 > dt->limit)
915
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
916
    ptr = dt->base + intno * 16;
917
    e1 = ldl_kernel(ptr);
918
    e2 = ldl_kernel(ptr + 4);
919
    e3 = ldl_kernel(ptr + 8);
920
    /* check gate type */
921
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
922
    switch(type) {
923
    case 14: /* 386 interrupt gate */
924
    case 15: /* 386 trap gate */
925
        break;
926
    default:
927
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
928
        break;
929
    }
930
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
931
    cpl = env->hflags & HF_CPL_MASK;
932
    /* check privilege if software int */
933
    if (is_int && dpl < cpl)
934
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
935
    /* check valid bit */
936
    if (!(e2 & DESC_P_MASK))
937
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
938
    selector = e1 >> 16;
939
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
940
    ist = e2 & 7;
941
    if ((selector & 0xfffc) == 0)
942
        raise_exception_err(EXCP0D_GPF, 0);
943

    
944
    if (load_segment(&e1, &e2, selector) != 0)
945
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
947
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
949
    if (dpl > cpl)
950
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
951
    if (!(e2 & DESC_P_MASK))
952
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
953
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
954
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
956
        /* to inner privilege */
957
        if (ist != 0)
958
            esp = get_rsp_from_tss(ist + 3);
959
        else
960
            esp = get_rsp_from_tss(dpl);
961
        esp &= ~0xfLL; /* align stack */
962
        ss = 0;
963
        new_stack = 1;
964
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
965
        /* to same privilege */
966
        if (env->eflags & VM_MASK)
967
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968
        new_stack = 0;
969
        if (ist != 0)
970
            esp = get_rsp_from_tss(ist + 3);
971
        else
972
            esp = ESP;
973
        esp &= ~0xfLL; /* align stack */
974
        dpl = cpl;
975
    } else {
976
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
977
        new_stack = 0; /* avoid warning */
978
        esp = 0; /* avoid warning */
979
    }
980

    
981
    PUSHQ(esp, env->segs[R_SS].selector);
982
    PUSHQ(esp, ESP);
983
    PUSHQ(esp, compute_eflags());
984
    PUSHQ(esp, env->segs[R_CS].selector);
985
    PUSHQ(esp, old_eip);
986
    if (has_error_code) {
987
        PUSHQ(esp, error_code);
988
    }
989

    
990
    if (new_stack) {
991
        ss = 0 | dpl;
992
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
993
    }
994
    ESP = esp;
995

    
996
    selector = (selector & ~3) | dpl;
997
    cpu_x86_load_seg_cache(env, R_CS, selector,
998
                   get_seg_base(e1, e2),
999
                   get_seg_limit(e1, e2),
1000
                   e2);
1001
    cpu_x86_set_cpl(env, dpl);
1002
    env->eip = offset;
1003

    
1004
    /* interrupt gate clear IF mask */
1005
    if ((type & 1) == 0) {
1006
        env->eflags &= ~IF_MASK;
1007
    }
1008
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1009
}
1010
#endif
1011

    
1012
#ifdef TARGET_X86_64
1013
#if defined(CONFIG_USER_ONLY)
1014
void helper_syscall(int next_eip_addend)
1015
{
1016
    env->exception_index = EXCP_SYSCALL;
1017
    env->exception_next_eip = env->eip + next_eip_addend;
1018
    cpu_loop_exit();
1019
}
1020
#else
1021
void helper_syscall(int next_eip_addend)
1022
{
1023
    int selector;
1024

    
1025
    if (!(env->efer & MSR_EFER_SCE)) {
1026
        raise_exception_err(EXCP06_ILLOP, 0);
1027
    }
1028
    selector = (env->star >> 32) & 0xffff;
1029
    if (env->hflags & HF_LMA_MASK) {
1030
        int code64;
1031

    
1032
        ECX = env->eip + next_eip_addend;
1033
        env->regs[11] = compute_eflags();
1034

    
1035
        code64 = env->hflags & HF_CS64_MASK;
1036

    
1037
        cpu_x86_set_cpl(env, 0);
1038
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1039
                           0, 0xffffffff,
1040
                               DESC_G_MASK | DESC_P_MASK |
1041
                               DESC_S_MASK |
1042
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1043
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1044
                               0, 0xffffffff,
1045
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046
                               DESC_S_MASK |
1047
                               DESC_W_MASK | DESC_A_MASK);
1048
        env->eflags &= ~env->fmask;
1049
        load_eflags(env->eflags, 0);
1050
        if (code64)
1051
            env->eip = env->lstar;
1052
        else
1053
            env->eip = env->cstar;
1054
    } else {
1055
        ECX = (uint32_t)(env->eip + next_eip_addend);
1056

    
1057
        cpu_x86_set_cpl(env, 0);
1058
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1059
                           0, 0xffffffff,
1060
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1061
                               DESC_S_MASK |
1062
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1063
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1064
                               0, 0xffffffff,
1065
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066
                               DESC_S_MASK |
1067
                               DESC_W_MASK | DESC_A_MASK);
1068
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1069
        env->eip = (uint32_t)env->star;
1070
    }
1071
}
1072
#endif
1073
#endif
1074

    
1075
#ifdef TARGET_X86_64
1076
void helper_sysret(int dflag)
1077
{
1078
    int cpl, selector;
1079

    
1080
    if (!(env->efer & MSR_EFER_SCE)) {
1081
        raise_exception_err(EXCP06_ILLOP, 0);
1082
    }
1083
    cpl = env->hflags & HF_CPL_MASK;
1084
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1085
        raise_exception_err(EXCP0D_GPF, 0);
1086
    }
1087
    selector = (env->star >> 48) & 0xffff;
1088
    if (env->hflags & HF_LMA_MASK) {
1089
        if (dflag == 2) {
1090
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1091
                                   0, 0xffffffff,
1092
                                   DESC_G_MASK | DESC_P_MASK |
1093
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1095
                                   DESC_L_MASK);
1096
            env->eip = ECX;
1097
        } else {
1098
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1099
                                   0, 0xffffffff,
1100
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1103
            env->eip = (uint32_t)ECX;
1104
        }
1105
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1106
                               0, 0xffffffff,
1107
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109
                               DESC_W_MASK | DESC_A_MASK);
1110
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1111
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1112
        cpu_x86_set_cpl(env, 3);
1113
    } else {
1114
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1115
                               0, 0xffffffff,
1116
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1119
        env->eip = (uint32_t)ECX;
1120
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1121
                               0, 0xffffffff,
1122
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1123
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1124
                               DESC_W_MASK | DESC_A_MASK);
1125
        env->eflags |= IF_MASK;
1126
        cpu_x86_set_cpl(env, 3);
1127
    }
1128
#ifdef USE_KQEMU
1129
    if (kqemu_is_ok(env)) {
1130
        if (env->hflags & HF_LMA_MASK)
1131
            CC_OP = CC_OP_EFLAGS;
1132
        env->exception_index = -1;
1133
        cpu_loop_exit();
1134
    }
1135
#endif
1136
}
1137
#endif
1138

    
1139
/* real mode interrupt */
1140
static void do_interrupt_real(int intno, int is_int, int error_code,
1141
                              unsigned int next_eip)
1142
{
1143
    SegmentCache *dt;
1144
    target_ulong ptr, ssp;
1145
    int selector;
1146
    uint32_t offset, esp;
1147
    uint32_t old_cs, old_eip;
1148

    
1149
    /* real mode (simpler !) */
1150
    dt = &env->idt;
1151
    if (intno * 4 + 3 > dt->limit)
1152
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1153
    ptr = dt->base + intno * 4;
1154
    offset = lduw_kernel(ptr);
1155
    selector = lduw_kernel(ptr + 2);
1156
    esp = ESP;
1157
    ssp = env->segs[R_SS].base;
1158
    if (is_int)
1159
        old_eip = next_eip;
1160
    else
1161
        old_eip = env->eip;
1162
    old_cs = env->segs[R_CS].selector;
1163
    /* XXX: use SS segment size ? */
1164
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1165
    PUSHW(ssp, esp, 0xffff, old_cs);
1166
    PUSHW(ssp, esp, 0xffff, old_eip);
1167

    
1168
    /* update processor state */
1169
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1170
    env->eip = offset;
1171
    env->segs[R_CS].selector = selector;
1172
    env->segs[R_CS].base = (selector << 4);
1173
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1174
}
1175

    
1176
/* fake user mode interrupt */
1177
void do_interrupt_user(int intno, int is_int, int error_code,
1178
                       target_ulong next_eip)
1179
{
1180
    SegmentCache *dt;
1181
    target_ulong ptr;
1182
    int dpl, cpl, shift;
1183
    uint32_t e2;
1184

    
1185
    dt = &env->idt;
1186
    if (env->hflags & HF_LMA_MASK) {
1187
        shift = 4;
1188
    } else {
1189
        shift = 3;
1190
    }
1191
    ptr = dt->base + (intno << shift);
1192
    e2 = ldl_kernel(ptr + 4);
1193

    
1194
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1195
    cpl = env->hflags & HF_CPL_MASK;
1196
    /* check privilege if software int */
1197
    if (is_int && dpl < cpl)
1198
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1199

    
1200
    /* Since we emulate only user space, we cannot do more than
1201
       exiting the emulation with the suitable exception and error
1202
       code */
1203
    if (is_int)
1204
        EIP = next_eip;
1205
}
1206

    
1207
/*
1208
 * Begin execution of an interruption. is_int is TRUE if coming from
1209
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1210
 * instruction. It is only relevant if is_int is TRUE.
1211
 */
1212
void do_interrupt(int intno, int is_int, int error_code,
1213
                  target_ulong next_eip, int is_hw)
1214
{
1215
    if (loglevel & CPU_LOG_INT) {
1216
        if ((env->cr[0] & CR0_PE_MASK)) {
1217
            static int count;
1218
            fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1219
                    count, intno, error_code, is_int,
1220
                    env->hflags & HF_CPL_MASK,
1221
                    env->segs[R_CS].selector, EIP,
1222
                    (int)env->segs[R_CS].base + EIP,
1223
                    env->segs[R_SS].selector, ESP);
1224
            if (intno == 0x0e) {
1225
                fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1226
            } else {
1227
                fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1228
            }
1229
            fprintf(logfile, "\n");
1230
            cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1231
#if 0
1232
            {
1233
                int i;
1234
                uint8_t *ptr;
1235
                fprintf(logfile, "       code=");
1236
                ptr = env->segs[R_CS].base + env->eip;
1237
                for(i = 0; i < 16; i++) {
1238
                    fprintf(logfile, " %02x", ldub(ptr + i));
1239
                }
1240
                fprintf(logfile, "\n");
1241
            }
1242
#endif
1243
            count++;
1244
        }
1245
    }
1246
    if (env->cr[0] & CR0_PE_MASK) {
1247
#ifdef TARGET_X86_64
1248
        if (env->hflags & HF_LMA_MASK) {
1249
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1250
        } else
1251
#endif
1252
        {
1253
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1254
        }
1255
    } else {
1256
        do_interrupt_real(intno, is_int, error_code, next_eip);
1257
    }
1258
}
1259

    
1260
/*
1261
 * Check nested exceptions and change to double or triple fault if
1262
 * needed. It should only be called, if this is not an interrupt.
1263
 * Returns the new exception number.
1264
 */
1265
static int check_exception(int intno, int *error_code)
1266
{
1267
    int first_contributory = env->old_exception == 0 ||
1268
                              (env->old_exception >= 10 &&
1269
                               env->old_exception <= 13);
1270
    int second_contributory = intno == 0 ||
1271
                               (intno >= 10 && intno <= 13);
1272

    
1273
    if (loglevel & CPU_LOG_INT)
1274
        fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1275
                env->old_exception, intno);
1276

    
1277
    if (env->old_exception == EXCP08_DBLE)
1278
        cpu_abort(env, "triple fault");
1279

    
1280
    if ((first_contributory && second_contributory)
1281
        || (env->old_exception == EXCP0E_PAGE &&
1282
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1283
        intno = EXCP08_DBLE;
1284
        *error_code = 0;
1285
    }
1286

    
1287
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1288
        (intno == EXCP08_DBLE))
1289
        env->old_exception = intno;
1290

    
1291
    return intno;
1292
}
1293

    
1294
/*
1295
 * Signal an interruption. It is executed in the main CPU loop.
1296
 * is_int is TRUE if coming from the int instruction. next_eip is the
1297
 * EIP value AFTER the interrupt instruction. It is only relevant if
1298
 * is_int is TRUE.
1299
 */
1300
static void noreturn raise_interrupt(int intno, int is_int, int error_code,
1301
                                     int next_eip_addend)
1302
{
1303
    if (!is_int) {
1304
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1305
        intno = check_exception(intno, &error_code);
1306
    } else {
1307
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1308
    }
1309

    
1310
    env->exception_index = intno;
1311
    env->error_code = error_code;
1312
    env->exception_is_int = is_int;
1313
    env->exception_next_eip = env->eip + next_eip_addend;
1314
    cpu_loop_exit();
1315
}
1316

    
1317
/* shortcuts to generate exceptions */
1318

    
1319
void raise_exception_err(int exception_index, int error_code)
1320
{
1321
    raise_interrupt(exception_index, 0, error_code, 0);
1322
}
1323

    
1324
void raise_exception(int exception_index)
1325
{
1326
    raise_interrupt(exception_index, 0, 0, 0);
1327
}
1328

    
1329
/* SMM support */
1330

    
1331
#if defined(CONFIG_USER_ONLY)
1332

    
1333
void do_smm_enter(void)
1334
{
1335
}
1336

    
1337
void helper_rsm(void)
1338
{
1339
}
1340

    
1341
#else
1342

    
1343
#ifdef TARGET_X86_64
1344
#define SMM_REVISION_ID 0x00020064
1345
#else
1346
#define SMM_REVISION_ID 0x00020000
1347
#endif
1348

    
1349
void do_smm_enter(void)
1350
{
1351
    target_ulong sm_state;
1352
    SegmentCache *dt;
1353
    int i, offset;
1354

    
1355
    if (loglevel & CPU_LOG_INT) {
1356
        fprintf(logfile, "SMM: enter\n");
1357
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1358
    }
1359

    
1360
    env->hflags |= HF_SMM_MASK;
1361
    cpu_smm_update(env);
1362

    
1363
    sm_state = env->smbase + 0x8000;
1364

    
1365
#ifdef TARGET_X86_64
1366
    for(i = 0; i < 6; i++) {
1367
        dt = &env->segs[i];
1368
        offset = 0x7e00 + i * 16;
1369
        stw_phys(sm_state + offset, dt->selector);
1370
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371
        stl_phys(sm_state + offset + 4, dt->limit);
1372
        stq_phys(sm_state + offset + 8, dt->base);
1373
    }
1374

    
1375
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1376
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1377

    
1378
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1380
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1382

    
1383
    stq_phys(sm_state + 0x7e88, env->idt.base);
1384
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1385

    
1386
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1387
    stq_phys(sm_state + 0x7e98, env->tr.base);
1388
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1389
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1390

    
1391
    stq_phys(sm_state + 0x7ed0, env->efer);
1392

    
1393
    stq_phys(sm_state + 0x7ff8, EAX);
1394
    stq_phys(sm_state + 0x7ff0, ECX);
1395
    stq_phys(sm_state + 0x7fe8, EDX);
1396
    stq_phys(sm_state + 0x7fe0, EBX);
1397
    stq_phys(sm_state + 0x7fd8, ESP);
1398
    stq_phys(sm_state + 0x7fd0, EBP);
1399
    stq_phys(sm_state + 0x7fc8, ESI);
1400
    stq_phys(sm_state + 0x7fc0, EDI);
1401
    for(i = 8; i < 16; i++)
1402
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403
    stq_phys(sm_state + 0x7f78, env->eip);
1404
    stl_phys(sm_state + 0x7f70, compute_eflags());
1405
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1406
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1407

    
1408
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1409
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1410
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1411

    
1412
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413
    stl_phys(sm_state + 0x7f00, env->smbase);
1414
#else
1415
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1418
    stl_phys(sm_state + 0x7ff0, env->eip);
1419
    stl_phys(sm_state + 0x7fec, EDI);
1420
    stl_phys(sm_state + 0x7fe8, ESI);
1421
    stl_phys(sm_state + 0x7fe4, EBP);
1422
    stl_phys(sm_state + 0x7fe0, ESP);
1423
    stl_phys(sm_state + 0x7fdc, EBX);
1424
    stl_phys(sm_state + 0x7fd8, EDX);
1425
    stl_phys(sm_state + 0x7fd4, ECX);
1426
    stl_phys(sm_state + 0x7fd0, EAX);
1427
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1429

    
1430
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431
    stl_phys(sm_state + 0x7f64, env->tr.base);
1432
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1433
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1434

    
1435
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1437
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1439

    
1440
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1441
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1442

    
1443
    stl_phys(sm_state + 0x7f58, env->idt.base);
1444
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1445

    
1446
    for(i = 0; i < 6; i++) {
1447
        dt = &env->segs[i];
1448
        if (i < 3)
1449
            offset = 0x7f84 + i * 12;
1450
        else
1451
            offset = 0x7f2c + (i - 3) * 12;
1452
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453
        stl_phys(sm_state + offset + 8, dt->base);
1454
        stl_phys(sm_state + offset + 4, dt->limit);
1455
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1456
    }
1457
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1458

    
1459
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460
    stl_phys(sm_state + 0x7ef8, env->smbase);
1461
#endif
1462
    /* init SMM cpu state */
1463

    
1464
#ifdef TARGET_X86_64
1465
    cpu_load_efer(env, 0);
1466
#endif
1467
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1468
    env->eip = 0x00008000;
1469
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1470
                           0xffffffff, 0);
1471
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1472
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1473
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1474
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1475
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1476

    
1477
    cpu_x86_update_cr0(env,
1478
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1479
    cpu_x86_update_cr4(env, 0);
1480
    env->dr[7] = 0x00000400;
1481
    CC_OP = CC_OP_EFLAGS;
1482
}
1483

    
1484
void helper_rsm(void)
1485
{
1486
    target_ulong sm_state;
1487
    int i, offset;
1488
    uint32_t val;
1489

    
1490
    sm_state = env->smbase + 0x8000;
1491
#ifdef TARGET_X86_64
1492
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1493

    
1494
    for(i = 0; i < 6; i++) {
1495
        offset = 0x7e00 + i * 16;
1496
        cpu_x86_load_seg_cache(env, i,
1497
                               lduw_phys(sm_state + offset),
1498
                               ldq_phys(sm_state + offset + 8),
1499
                               ldl_phys(sm_state + offset + 4),
1500
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1501
    }
1502

    
1503
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1504
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1505

    
1506
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1507
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1508
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1509
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1510

    
1511
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1512
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1513

    
1514
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1515
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1516
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1517
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1518

    
1519
    EAX = ldq_phys(sm_state + 0x7ff8);
1520
    ECX = ldq_phys(sm_state + 0x7ff0);
1521
    EDX = ldq_phys(sm_state + 0x7fe8);
1522
    EBX = ldq_phys(sm_state + 0x7fe0);
1523
    ESP = ldq_phys(sm_state + 0x7fd8);
1524
    EBP = ldq_phys(sm_state + 0x7fd0);
1525
    ESI = ldq_phys(sm_state + 0x7fc8);
1526
    EDI = ldq_phys(sm_state + 0x7fc0);
1527
    for(i = 8; i < 16; i++)
1528
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1529
    env->eip = ldq_phys(sm_state + 0x7f78);
1530
    load_eflags(ldl_phys(sm_state + 0x7f70),
1531
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1532
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1533
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1534

    
1535
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1536
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1537
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1538

    
1539
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1540
    if (val & 0x20000) {
1541
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1542
    }
1543
#else
1544
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1545
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1546
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1547
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1548
    env->eip = ldl_phys(sm_state + 0x7ff0);
1549
    EDI = ldl_phys(sm_state + 0x7fec);
1550
    ESI = ldl_phys(sm_state + 0x7fe8);
1551
    EBP = ldl_phys(sm_state + 0x7fe4);
1552
    ESP = ldl_phys(sm_state + 0x7fe0);
1553
    EBX = ldl_phys(sm_state + 0x7fdc);
1554
    EDX = ldl_phys(sm_state + 0x7fd8);
1555
    ECX = ldl_phys(sm_state + 0x7fd4);
1556
    EAX = ldl_phys(sm_state + 0x7fd0);
1557
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1558
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1559

    
1560
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1561
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1562
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1563
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1564

    
1565
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1566
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1567
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1568
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1569

    
1570
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1571
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1572

    
1573
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1574
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1575

    
1576
    for(i = 0; i < 6; i++) {
1577
        if (i < 3)
1578
            offset = 0x7f84 + i * 12;
1579
        else
1580
            offset = 0x7f2c + (i - 3) * 12;
1581
        cpu_x86_load_seg_cache(env, i,
1582
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1583
                               ldl_phys(sm_state + offset + 8),
1584
                               ldl_phys(sm_state + offset + 4),
1585
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1586
    }
1587
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1588

    
1589
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1590
    if (val & 0x20000) {
1591
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1592
    }
1593
#endif
1594
    CC_OP = CC_OP_EFLAGS;
1595
    env->hflags &= ~HF_SMM_MASK;
1596
    cpu_smm_update(env);
1597

    
1598
    if (loglevel & CPU_LOG_INT) {
1599
        fprintf(logfile, "SMM: after RSM\n");
1600
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1601
    }
1602
}
1603

    
1604
#endif /* !CONFIG_USER_ONLY */
1605

    
1606

    
1607
/* division, flags are undefined */
1608

    
1609
void helper_divb_AL(target_ulong t0)
1610
{
1611
    unsigned int num, den, q, r;
1612

    
1613
    num = (EAX & 0xffff);
1614
    den = (t0 & 0xff);
1615
    if (den == 0) {
1616
        raise_exception(EXCP00_DIVZ);
1617
    }
1618
    q = (num / den);
1619
    if (q > 0xff)
1620
        raise_exception(EXCP00_DIVZ);
1621
    q &= 0xff;
1622
    r = (num % den) & 0xff;
1623
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1624
}
1625

    
1626
void helper_idivb_AL(target_ulong t0)
1627
{
1628
    int num, den, q, r;
1629

    
1630
    num = (int16_t)EAX;
1631
    den = (int8_t)t0;
1632
    if (den == 0) {
1633
        raise_exception(EXCP00_DIVZ);
1634
    }
1635
    q = (num / den);
1636
    if (q != (int8_t)q)
1637
        raise_exception(EXCP00_DIVZ);
1638
    q &= 0xff;
1639
    r = (num % den) & 0xff;
1640
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1641
}
1642

    
1643
void helper_divw_AX(target_ulong t0)
1644
{
1645
    unsigned int num, den, q, r;
1646

    
1647
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1648
    den = (t0 & 0xffff);
1649
    if (den == 0) {
1650
        raise_exception(EXCP00_DIVZ);
1651
    }
1652
    q = (num / den);
1653
    if (q > 0xffff)
1654
        raise_exception(EXCP00_DIVZ);
1655
    q &= 0xffff;
1656
    r = (num % den) & 0xffff;
1657
    EAX = (EAX & ~0xffff) | q;
1658
    EDX = (EDX & ~0xffff) | r;
1659
}
1660

    
1661
void helper_idivw_AX(target_ulong t0)
1662
{
1663
    int num, den, q, r;
1664

    
1665
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1666
    den = (int16_t)t0;
1667
    if (den == 0) {
1668
        raise_exception(EXCP00_DIVZ);
1669
    }
1670
    q = (num / den);
1671
    if (q != (int16_t)q)
1672
        raise_exception(EXCP00_DIVZ);
1673
    q &= 0xffff;
1674
    r = (num % den) & 0xffff;
1675
    EAX = (EAX & ~0xffff) | q;
1676
    EDX = (EDX & ~0xffff) | r;
1677
}
1678

    
1679
void helper_divl_EAX(target_ulong t0)
1680
{
1681
    unsigned int den, r;
1682
    uint64_t num, q;
1683

    
1684
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1685
    den = t0;
1686
    if (den == 0) {
1687
        raise_exception(EXCP00_DIVZ);
1688
    }
1689
    q = (num / den);
1690
    r = (num % den);
1691
    if (q > 0xffffffff)
1692
        raise_exception(EXCP00_DIVZ);
1693
    EAX = (uint32_t)q;
1694
    EDX = (uint32_t)r;
1695
}
1696

    
1697
void helper_idivl_EAX(target_ulong t0)
1698
{
1699
    int den, r;
1700
    int64_t num, q;
1701

    
1702
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1703
    den = t0;
1704
    if (den == 0) {
1705
        raise_exception(EXCP00_DIVZ);
1706
    }
1707
    q = (num / den);
1708
    r = (num % den);
1709
    if (q != (int32_t)q)
1710
        raise_exception(EXCP00_DIVZ);
1711
    EAX = (uint32_t)q;
1712
    EDX = (uint32_t)r;
1713
}
1714

    
1715
/* bcd */
1716

    
1717
/* XXX: exception */
1718
void helper_aam(int base)
1719
{
1720
    int al, ah;
1721
    al = EAX & 0xff;
1722
    ah = al / base;
1723
    al = al % base;
1724
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1725
    CC_DST = al;
1726
}
1727

    
1728
void helper_aad(int base)
1729
{
1730
    int al, ah;
1731
    al = EAX & 0xff;
1732
    ah = (EAX >> 8) & 0xff;
1733
    al = ((ah * base) + al) & 0xff;
1734
    EAX = (EAX & ~0xffff) | al;
1735
    CC_DST = al;
1736
}
1737

    
1738
void helper_aaa(void)
1739
{
1740
    int icarry;
1741
    int al, ah, af;
1742
    int eflags;
1743

    
1744
    eflags = helper_cc_compute_all(CC_OP);
1745
    af = eflags & CC_A;
1746
    al = EAX & 0xff;
1747
    ah = (EAX >> 8) & 0xff;
1748

    
1749
    icarry = (al > 0xf9);
1750
    if (((al & 0x0f) > 9 ) || af) {
1751
        al = (al + 6) & 0x0f;
1752
        ah = (ah + 1 + icarry) & 0xff;
1753
        eflags |= CC_C | CC_A;
1754
    } else {
1755
        eflags &= ~(CC_C | CC_A);
1756
        al &= 0x0f;
1757
    }
1758
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1759
    CC_SRC = eflags;
1760
}
1761

    
1762
void helper_aas(void)
1763
{
1764
    int icarry;
1765
    int al, ah, af;
1766
    int eflags;
1767

    
1768
    eflags = helper_cc_compute_all(CC_OP);
1769
    af = eflags & CC_A;
1770
    al = EAX & 0xff;
1771
    ah = (EAX >> 8) & 0xff;
1772

    
1773
    icarry = (al < 6);
1774
    if (((al & 0x0f) > 9 ) || af) {
1775
        al = (al - 6) & 0x0f;
1776
        ah = (ah - 1 - icarry) & 0xff;
1777
        eflags |= CC_C | CC_A;
1778
    } else {
1779
        eflags &= ~(CC_C | CC_A);
1780
        al &= 0x0f;
1781
    }
1782
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1783
    CC_SRC = eflags;
1784
}
1785

    
1786
void helper_daa(void)
1787
{
1788
    int al, af, cf;
1789
    int eflags;
1790

    
1791
    eflags = helper_cc_compute_all(CC_OP);
1792
    cf = eflags & CC_C;
1793
    af = eflags & CC_A;
1794
    al = EAX & 0xff;
1795

    
1796
    eflags = 0;
1797
    if (((al & 0x0f) > 9 ) || af) {
1798
        al = (al + 6) & 0xff;
1799
        eflags |= CC_A;
1800
    }
1801
    if ((al > 0x9f) || cf) {
1802
        al = (al + 0x60) & 0xff;
1803
        eflags |= CC_C;
1804
    }
1805
    EAX = (EAX & ~0xff) | al;
1806
    /* well, speed is not an issue here, so we compute the flags by hand */
1807
    eflags |= (al == 0) << 6; /* zf */
1808
    eflags |= parity_table[al]; /* pf */
1809
    eflags |= (al & 0x80); /* sf */
1810
    CC_SRC = eflags;
1811
}
1812

    
1813
void helper_das(void)
1814
{
1815
    int al, al1, af, cf;
1816
    int eflags;
1817

    
1818
    eflags = helper_cc_compute_all(CC_OP);
1819
    cf = eflags & CC_C;
1820
    af = eflags & CC_A;
1821
    al = EAX & 0xff;
1822

    
1823
    eflags = 0;
1824
    al1 = al;
1825
    if (((al & 0x0f) > 9 ) || af) {
1826
        eflags |= CC_A;
1827
        if (al < 6 || cf)
1828
            eflags |= CC_C;
1829
        al = (al - 6) & 0xff;
1830
    }
1831
    if ((al1 > 0x99) || cf) {
1832
        al = (al - 0x60) & 0xff;
1833
        eflags |= CC_C;
1834
    }
1835
    EAX = (EAX & ~0xff) | al;
1836
    /* well, speed is not an issue here, so we compute the flags by hand */
1837
    eflags |= (al == 0) << 6; /* zf */
1838
    eflags |= parity_table[al]; /* pf */
1839
    eflags |= (al & 0x80); /* sf */
1840
    CC_SRC = eflags;
1841
}
1842

    
1843
void helper_into(int next_eip_addend)
1844
{
1845
    int eflags;
1846
    eflags = helper_cc_compute_all(CC_OP);
1847
    if (eflags & CC_O) {
1848
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1849
    }
1850
}
1851

    
1852
void helper_cmpxchg8b(target_ulong a0)
1853
{
1854
    uint64_t d;
1855
    int eflags;
1856

    
1857
    eflags = helper_cc_compute_all(CC_OP);
1858
    d = ldq(a0);
1859
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1860
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1861
        eflags |= CC_Z;
1862
    } else {
1863
        /* always do the store */
1864
        stq(a0, d); 
1865
        EDX = (uint32_t)(d >> 32);
1866
        EAX = (uint32_t)d;
1867
        eflags &= ~CC_Z;
1868
    }
1869
    CC_SRC = eflags;
1870
}
1871

    
1872
#ifdef TARGET_X86_64
1873
void helper_cmpxchg16b(target_ulong a0)
1874
{
1875
    uint64_t d0, d1;
1876
    int eflags;
1877

    
1878
    if ((a0 & 0xf) != 0)
1879
        raise_exception(EXCP0D_GPF);
1880
    eflags = helper_cc_compute_all(CC_OP);
1881
    d0 = ldq(a0);
1882
    d1 = ldq(a0 + 8);
1883
    if (d0 == EAX && d1 == EDX) {
1884
        stq(a0, EBX);
1885
        stq(a0 + 8, ECX);
1886
        eflags |= CC_Z;
1887
    } else {
1888
        /* always do the store */
1889
        stq(a0, d0); 
1890
        stq(a0 + 8, d1); 
1891
        EDX = d1;
1892
        EAX = d0;
1893
        eflags &= ~CC_Z;
1894
    }
1895
    CC_SRC = eflags;
1896
}
1897
#endif
1898

    
1899
void helper_single_step(void)
1900
{
1901
#ifndef CONFIG_USER_ONLY
1902
    check_hw_breakpoints(env, 1);
1903
    env->dr[6] |= DR6_BS;
1904
#endif
1905
    raise_exception(EXCP01_DB);
1906
}
1907

    
1908
void helper_cpuid(void)
1909
{
1910
    uint32_t eax, ebx, ecx, edx;
1911

    
1912
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1913

    
1914
    cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1915
    EAX = eax;
1916
    EBX = ebx;
1917
    ECX = ecx;
1918
    EDX = edx;
1919
}
1920

    
1921
void helper_enter_level(int level, int data32, target_ulong t1)
1922
{
1923
    target_ulong ssp;
1924
    uint32_t esp_mask, esp, ebp;
1925

    
1926
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1927
    ssp = env->segs[R_SS].base;
1928
    ebp = EBP;
1929
    esp = ESP;
1930
    if (data32) {
1931
        /* 32 bit */
1932
        esp -= 4;
1933
        while (--level) {
1934
            esp -= 4;
1935
            ebp -= 4;
1936
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1937
        }
1938
        esp -= 4;
1939
        stl(ssp + (esp & esp_mask), t1);
1940
    } else {
1941
        /* 16 bit */
1942
        esp -= 2;
1943
        while (--level) {
1944
            esp -= 2;
1945
            ebp -= 2;
1946
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1947
        }
1948
        esp -= 2;
1949
        stw(ssp + (esp & esp_mask), t1);
1950
    }
1951
}
1952

    
1953
#ifdef TARGET_X86_64
1954
void helper_enter64_level(int level, int data64, target_ulong t1)
1955
{
1956
    target_ulong esp, ebp;
1957
    ebp = EBP;
1958
    esp = ESP;
1959

    
1960
    if (data64) {
1961
        /* 64 bit */
1962
        esp -= 8;
1963
        while (--level) {
1964
            esp -= 8;
1965
            ebp -= 8;
1966
            stq(esp, ldq(ebp));
1967
        }
1968
        esp -= 8;
1969
        stq(esp, t1);
1970
    } else {
1971
        /* 16 bit */
1972
        esp -= 2;
1973
        while (--level) {
1974
            esp -= 2;
1975
            ebp -= 2;
1976
            stw(esp, lduw(ebp));
1977
        }
1978
        esp -= 2;
1979
        stw(esp, t1);
1980
    }
1981
}
1982
#endif
1983

    
1984
void helper_lldt(int selector)
1985
{
1986
    SegmentCache *dt;
1987
    uint32_t e1, e2;
1988
    int index, entry_limit;
1989
    target_ulong ptr;
1990

    
1991
    selector &= 0xffff;
1992
    if ((selector & 0xfffc) == 0) {
1993
        /* XXX: NULL selector case: invalid LDT */
1994
        env->ldt.base = 0;
1995
        env->ldt.limit = 0;
1996
    } else {
1997
        if (selector & 0x4)
1998
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1999
        dt = &env->gdt;
2000
        index = selector & ~7;
2001
#ifdef TARGET_X86_64
2002
        if (env->hflags & HF_LMA_MASK)
2003
            entry_limit = 15;
2004
        else
2005
#endif
2006
            entry_limit = 7;
2007
        if ((index + entry_limit) > dt->limit)
2008
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2009
        ptr = dt->base + index;
2010
        e1 = ldl_kernel(ptr);
2011
        e2 = ldl_kernel(ptr + 4);
2012
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2013
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2014
        if (!(e2 & DESC_P_MASK))
2015
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2016
#ifdef TARGET_X86_64
2017
        if (env->hflags & HF_LMA_MASK) {
2018
            uint32_t e3;
2019
            e3 = ldl_kernel(ptr + 8);
2020
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2021
            env->ldt.base |= (target_ulong)e3 << 32;
2022
        } else
2023
#endif
2024
        {
2025
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2026
        }
2027
    }
2028
    env->ldt.selector = selector;
2029
}
2030

    
2031
void helper_ltr(int selector)
2032
{
2033
    SegmentCache *dt;
2034
    uint32_t e1, e2;
2035
    int index, type, entry_limit;
2036
    target_ulong ptr;
2037

    
2038
    selector &= 0xffff;
2039
    if ((selector & 0xfffc) == 0) {
2040
        /* NULL selector case: invalid TR */
2041
        env->tr.base = 0;
2042
        env->tr.limit = 0;
2043
        env->tr.flags = 0;
2044
    } else {
2045
        if (selector & 0x4)
2046
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2047
        dt = &env->gdt;
2048
        index = selector & ~7;
2049
#ifdef TARGET_X86_64
2050
        if (env->hflags & HF_LMA_MASK)
2051
            entry_limit = 15;
2052
        else
2053
#endif
2054
            entry_limit = 7;
2055
        if ((index + entry_limit) > dt->limit)
2056
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2057
        ptr = dt->base + index;
2058
        e1 = ldl_kernel(ptr);
2059
        e2 = ldl_kernel(ptr + 4);
2060
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2061
        if ((e2 & DESC_S_MASK) ||
2062
            (type != 1 && type != 9))
2063
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2064
        if (!(e2 & DESC_P_MASK))
2065
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2066
#ifdef TARGET_X86_64
2067
        if (env->hflags & HF_LMA_MASK) {
2068
            uint32_t e3, e4;
2069
            e3 = ldl_kernel(ptr + 8);
2070
            e4 = ldl_kernel(ptr + 12);
2071
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2072
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2073
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2074
            env->tr.base |= (target_ulong)e3 << 32;
2075
        } else
2076
#endif
2077
        {
2078
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2079
        }
2080
        e2 |= DESC_TSS_BUSY_MASK;
2081
        stl_kernel(ptr + 4, e2);
2082
    }
2083
    env->tr.selector = selector;
2084
}
2085

    
2086
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2087
void helper_load_seg(int seg_reg, int selector)
2088
{
2089
    uint32_t e1, e2;
2090
    int cpl, dpl, rpl;
2091
    SegmentCache *dt;
2092
    int index;
2093
    target_ulong ptr;
2094

    
2095
    selector &= 0xffff;
2096
    cpl = env->hflags & HF_CPL_MASK;
2097
    if ((selector & 0xfffc) == 0) {
2098
        /* null selector case */
2099
        if (seg_reg == R_SS
2100
#ifdef TARGET_X86_64
2101
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2102
#endif
2103
            )
2104
            raise_exception_err(EXCP0D_GPF, 0);
2105
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2106
    } else {
2107

    
2108
        if (selector & 0x4)
2109
            dt = &env->ldt;
2110
        else
2111
            dt = &env->gdt;
2112
        index = selector & ~7;
2113
        if ((index + 7) > dt->limit)
2114
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2115
        ptr = dt->base + index;
2116
        e1 = ldl_kernel(ptr);
2117
        e2 = ldl_kernel(ptr + 4);
2118

    
2119
        if (!(e2 & DESC_S_MASK))
2120
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2121
        rpl = selector & 3;
2122
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2123
        if (seg_reg == R_SS) {
2124
            /* must be writable segment */
2125
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2126
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2127
            if (rpl != cpl || dpl != cpl)
2128
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2129
        } else {
2130
            /* must be readable segment */
2131
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2132
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133

    
2134
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2135
                /* if not conforming code, test rights */
2136
                if (dpl < cpl || dpl < rpl)
2137
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2138
            }
2139
        }
2140

    
2141
        if (!(e2 & DESC_P_MASK)) {
2142
            if (seg_reg == R_SS)
2143
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2144
            else
2145
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2146
        }
2147

    
2148
        /* set the access bit if not already set */
2149
        if (!(e2 & DESC_A_MASK)) {
2150
            e2 |= DESC_A_MASK;
2151
            stl_kernel(ptr + 4, e2);
2152
        }
2153

    
2154
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2155
                       get_seg_base(e1, e2),
2156
                       get_seg_limit(e1, e2),
2157
                       e2);
2158
#if 0
2159
        fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2160
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2161
#endif
2162
    }
2163
}
2164

    
2165
/* protected mode jump */
2166
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2167
                           int next_eip_addend)
2168
{
2169
    int gate_cs, type;
2170
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2171
    target_ulong next_eip;
2172

    
2173
    if ((new_cs & 0xfffc) == 0)
2174
        raise_exception_err(EXCP0D_GPF, 0);
2175
    if (load_segment(&e1, &e2, new_cs) != 0)
2176
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2177
    cpl = env->hflags & HF_CPL_MASK;
2178
    if (e2 & DESC_S_MASK) {
2179
        if (!(e2 & DESC_CS_MASK))
2180
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2181
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2182
        if (e2 & DESC_C_MASK) {
2183
            /* conforming code segment */
2184
            if (dpl > cpl)
2185
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2186
        } else {
2187
            /* non conforming code segment */
2188
            rpl = new_cs & 3;
2189
            if (rpl > cpl)
2190
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191
            if (dpl != cpl)
2192
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2193
        }
2194
        if (!(e2 & DESC_P_MASK))
2195
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2196
        limit = get_seg_limit(e1, e2);
2197
        if (new_eip > limit &&
2198
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2199
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2200
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2201
                       get_seg_base(e1, e2), limit, e2);
2202
        EIP = new_eip;
2203
    } else {
2204
        /* jump to call or task gate */
2205
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2206
        rpl = new_cs & 3;
2207
        cpl = env->hflags & HF_CPL_MASK;
2208
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2209
        switch(type) {
2210
        case 1: /* 286 TSS */
2211
        case 9: /* 386 TSS */
2212
        case 5: /* task gate */
2213
            if (dpl < cpl || dpl < rpl)
2214
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2215
            next_eip = env->eip + next_eip_addend;
2216
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2217
            CC_OP = CC_OP_EFLAGS;
2218
            break;
2219
        case 4: /* 286 call gate */
2220
        case 12: /* 386 call gate */
2221
            if ((dpl < cpl) || (dpl < rpl))
2222
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223
            if (!(e2 & DESC_P_MASK))
2224
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2225
            gate_cs = e1 >> 16;
2226
            new_eip = (e1 & 0xffff);
2227
            if (type == 12)
2228
                new_eip |= (e2 & 0xffff0000);
2229
            if (load_segment(&e1, &e2, gate_cs) != 0)
2230
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2231
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2232
            /* must be code segment */
2233
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2234
                 (DESC_S_MASK | DESC_CS_MASK)))
2235
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2236
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2237
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2238
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2239
            if (!(e2 & DESC_P_MASK))
2240
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2241
            limit = get_seg_limit(e1, e2);
2242
            if (new_eip > limit)
2243
                raise_exception_err(EXCP0D_GPF, 0);
2244
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2245
                                   get_seg_base(e1, e2), limit, e2);
2246
            EIP = new_eip;
2247
            break;
2248
        default:
2249
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250
            break;
2251
        }
2252
    }
2253
}
2254

    
2255
/* real mode call */
2256
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2257
                       int shift, int next_eip)
2258
{
2259
    int new_eip;
2260
    uint32_t esp, esp_mask;
2261
    target_ulong ssp;
2262

    
2263
    new_eip = new_eip1;
2264
    esp = ESP;
2265
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2266
    ssp = env->segs[R_SS].base;
2267
    if (shift) {
2268
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2269
        PUSHL(ssp, esp, esp_mask, next_eip);
2270
    } else {
2271
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2272
        PUSHW(ssp, esp, esp_mask, next_eip);
2273
    }
2274

    
2275
    SET_ESP(esp, esp_mask);
2276
    env->eip = new_eip;
2277
    env->segs[R_CS].selector = new_cs;
2278
    env->segs[R_CS].base = (new_cs << 4);
2279
}
2280

    
2281
/* protected mode call */
2282
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2283
                            int shift, int next_eip_addend)
2284
{
2285
    int new_stack, i;
2286
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2287
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2288
    uint32_t val, limit, old_sp_mask;
2289
    target_ulong ssp, old_ssp, next_eip;
2290

    
2291
    next_eip = env->eip + next_eip_addend;
2292
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2293
    LOG_PCALL_STATE(env);
2294
    if ((new_cs & 0xfffc) == 0)
2295
        raise_exception_err(EXCP0D_GPF, 0);
2296
    if (load_segment(&e1, &e2, new_cs) != 0)
2297
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2298
    cpl = env->hflags & HF_CPL_MASK;
2299
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2300
    if (e2 & DESC_S_MASK) {
2301
        if (!(e2 & DESC_CS_MASK))
2302
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2303
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2304
        if (e2 & DESC_C_MASK) {
2305
            /* conforming code segment */
2306
            if (dpl > cpl)
2307
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2308
        } else {
2309
            /* non conforming code segment */
2310
            rpl = new_cs & 3;
2311
            if (rpl > cpl)
2312
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2313
            if (dpl != cpl)
2314
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2315
        }
2316
        if (!(e2 & DESC_P_MASK))
2317
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2318

    
2319
#ifdef TARGET_X86_64
2320
        /* XXX: check 16/32 bit cases in long mode */
2321
        if (shift == 2) {
2322
            target_ulong rsp;
2323
            /* 64 bit case */
2324
            rsp = ESP;
2325
            PUSHQ(rsp, env->segs[R_CS].selector);
2326
            PUSHQ(rsp, next_eip);
2327
            /* from this point, not restartable */
2328
            ESP = rsp;
2329
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2330
                                   get_seg_base(e1, e2),
2331
                                   get_seg_limit(e1, e2), e2);
2332
            EIP = new_eip;
2333
        } else
2334
#endif
2335
        {
2336
            sp = ESP;
2337
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2338
            ssp = env->segs[R_SS].base;
2339
            if (shift) {
2340
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2341
                PUSHL(ssp, sp, sp_mask, next_eip);
2342
            } else {
2343
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2344
                PUSHW(ssp, sp, sp_mask, next_eip);
2345
            }
2346

    
2347
            limit = get_seg_limit(e1, e2);
2348
            if (new_eip > limit)
2349
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2350
            /* from this point, not restartable */
2351
            SET_ESP(sp, sp_mask);
2352
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2353
                                   get_seg_base(e1, e2), limit, e2);
2354
            EIP = new_eip;
2355
        }
2356
    } else {
2357
        /* check gate type */
2358
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2359
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2360
        rpl = new_cs & 3;
2361
        switch(type) {
2362
        case 1: /* available 286 TSS */
2363
        case 9: /* available 386 TSS */
2364
        case 5: /* task gate */
2365
            if (dpl < cpl || dpl < rpl)
2366
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2367
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2368
            CC_OP = CC_OP_EFLAGS;
2369
            return;
2370
        case 4: /* 286 call gate */
2371
        case 12: /* 386 call gate */
2372
            break;
2373
        default:
2374
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375
            break;
2376
        }
2377
        shift = type >> 3;
2378

    
2379
        if (dpl < cpl || dpl < rpl)
2380
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381
        /* check valid bit */
2382
        if (!(e2 & DESC_P_MASK))
2383
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2384
        selector = e1 >> 16;
2385
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2386
        param_count = e2 & 0x1f;
2387
        if ((selector & 0xfffc) == 0)
2388
            raise_exception_err(EXCP0D_GPF, 0);
2389

    
2390
        if (load_segment(&e1, &e2, selector) != 0)
2391
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2392
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2393
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2394
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2395
        if (dpl > cpl)
2396
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2397
        if (!(e2 & DESC_P_MASK))
2398
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2399

    
2400
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2401
            /* to inner privilege */
2402
            get_ss_esp_from_tss(&ss, &sp, dpl);
2403
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2404
                        ss, sp, param_count, ESP);
2405
            if ((ss & 0xfffc) == 0)
2406
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2407
            if ((ss & 3) != dpl)
2408
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2409
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2410
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2411
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2412
            if (ss_dpl != dpl)
2413
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2414
            if (!(ss_e2 & DESC_S_MASK) ||
2415
                (ss_e2 & DESC_CS_MASK) ||
2416
                !(ss_e2 & DESC_W_MASK))
2417
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2418
            if (!(ss_e2 & DESC_P_MASK))
2419
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2420

    
2421
            //            push_size = ((param_count * 2) + 8) << shift;
2422

    
2423
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2424
            old_ssp = env->segs[R_SS].base;
2425

    
2426
            sp_mask = get_sp_mask(ss_e2);
2427
            ssp = get_seg_base(ss_e1, ss_e2);
2428
            if (shift) {
2429
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2430
                PUSHL(ssp, sp, sp_mask, ESP);
2431
                for(i = param_count - 1; i >= 0; i--) {
2432
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2433
                    PUSHL(ssp, sp, sp_mask, val);
2434
                }
2435
            } else {
2436
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2437
                PUSHW(ssp, sp, sp_mask, ESP);
2438
                for(i = param_count - 1; i >= 0; i--) {
2439
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2440
                    PUSHW(ssp, sp, sp_mask, val);
2441
                }
2442
            }
2443
            new_stack = 1;
2444
        } else {
2445
            /* to same privilege */
2446
            sp = ESP;
2447
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2448
            ssp = env->segs[R_SS].base;
2449
            //            push_size = (4 << shift);
2450
            new_stack = 0;
2451
        }
2452

    
2453
        if (shift) {
2454
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2455
            PUSHL(ssp, sp, sp_mask, next_eip);
2456
        } else {
2457
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2458
            PUSHW(ssp, sp, sp_mask, next_eip);
2459
        }
2460

    
2461
        /* from this point, not restartable */
2462

    
2463
        if (new_stack) {
2464
            ss = (ss & ~3) | dpl;
2465
            cpu_x86_load_seg_cache(env, R_SS, ss,
2466
                                   ssp,
2467
                                   get_seg_limit(ss_e1, ss_e2),
2468
                                   ss_e2);
2469
        }
2470

    
2471
        selector = (selector & ~3) | dpl;
2472
        cpu_x86_load_seg_cache(env, R_CS, selector,
2473
                       get_seg_base(e1, e2),
2474
                       get_seg_limit(e1, e2),
2475
                       e2);
2476
        cpu_x86_set_cpl(env, dpl);
2477
        SET_ESP(sp, sp_mask);
2478
        EIP = offset;
2479
    }
2480
#ifdef USE_KQEMU
2481
    if (kqemu_is_ok(env)) {
2482
        env->exception_index = -1;
2483
        cpu_loop_exit();
2484
    }
2485
#endif
2486
}
2487

    
2488
/* real and vm86 mode iret */
2489
void helper_iret_real(int shift)
2490
{
2491
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2492
    target_ulong ssp;
2493
    int eflags_mask;
2494

    
2495
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2496
    sp = ESP;
2497
    ssp = env->segs[R_SS].base;
2498
    if (shift == 1) {
2499
        /* 32 bits */
2500
        POPL(ssp, sp, sp_mask, new_eip);
2501
        POPL(ssp, sp, sp_mask, new_cs);
2502
        new_cs &= 0xffff;
2503
        POPL(ssp, sp, sp_mask, new_eflags);
2504
    } else {
2505
        /* 16 bits */
2506
        POPW(ssp, sp, sp_mask, new_eip);
2507
        POPW(ssp, sp, sp_mask, new_cs);
2508
        POPW(ssp, sp, sp_mask, new_eflags);
2509
    }
2510
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2511
    env->segs[R_CS].selector = new_cs;
2512
    env->segs[R_CS].base = (new_cs << 4);
2513
    env->eip = new_eip;
2514
    if (env->eflags & VM_MASK)
2515
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2516
    else
2517
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2518
    if (shift == 0)
2519
        eflags_mask &= 0xffff;
2520
    load_eflags(new_eflags, eflags_mask);
2521
    env->hflags2 &= ~HF2_NMI_MASK;
2522
}
2523

    
2524
static inline void validate_seg(int seg_reg, int cpl)
2525
{
2526
    int dpl;
2527
    uint32_t e2;
2528

    
2529
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2530
       they may still contain a valid base. I would be interested to
2531
       know how a real x86_64 CPU behaves */
2532
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2533
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2534
        return;
2535

    
2536
    e2 = env->segs[seg_reg].flags;
2537
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2538
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2539
        /* data or non conforming code segment */
2540
        if (dpl < cpl) {
2541
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2542
        }
2543
    }
2544
}
2545

    
2546
/* protected mode iret */
2547
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2548
{
2549
    uint32_t new_cs, new_eflags, new_ss;
2550
    uint32_t new_es, new_ds, new_fs, new_gs;
2551
    uint32_t e1, e2, ss_e1, ss_e2;
2552
    int cpl, dpl, rpl, eflags_mask, iopl;
2553
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2554

    
2555
#ifdef TARGET_X86_64
2556
    if (shift == 2)
2557
        sp_mask = -1;
2558
    else
2559
#endif
2560
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2561
    sp = ESP;
2562
    ssp = env->segs[R_SS].base;
2563
    new_eflags = 0; /* avoid warning */
2564
#ifdef TARGET_X86_64
2565
    if (shift == 2) {
2566
        POPQ(sp, new_eip);
2567
        POPQ(sp, new_cs);
2568
        new_cs &= 0xffff;
2569
        if (is_iret) {
2570
            POPQ(sp, new_eflags);
2571
        }
2572
    } else
2573
#endif
2574
    if (shift == 1) {
2575
        /* 32 bits */
2576
        POPL(ssp, sp, sp_mask, new_eip);
2577
        POPL(ssp, sp, sp_mask, new_cs);
2578
        new_cs &= 0xffff;
2579
        if (is_iret) {
2580
            POPL(ssp, sp, sp_mask, new_eflags);
2581
            if (new_eflags & VM_MASK)
2582
                goto return_to_vm86;
2583
        }
2584
    } else {
2585
        /* 16 bits */
2586
        POPW(ssp, sp, sp_mask, new_eip);
2587
        POPW(ssp, sp, sp_mask, new_cs);
2588
        if (is_iret)
2589
            POPW(ssp, sp, sp_mask, new_eflags);
2590
    }
2591
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2592
              new_cs, new_eip, shift, addend);
2593
    LOG_PCALL_STATE(env);
2594
    if ((new_cs & 0xfffc) == 0)
2595
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2596
    if (load_segment(&e1, &e2, new_cs) != 0)
2597
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2598
    if (!(e2 & DESC_S_MASK) ||
2599
        !(e2 & DESC_CS_MASK))
2600
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2601
    cpl = env->hflags & HF_CPL_MASK;
2602
    rpl = new_cs & 3;
2603
    if (rpl < cpl)
2604
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2605
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2606
    if (e2 & DESC_C_MASK) {
2607
        if (dpl > rpl)
2608
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2609
    } else {
2610
        if (dpl != rpl)
2611
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2612
    }
2613
    if (!(e2 & DESC_P_MASK))
2614
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2615

    
2616
    sp += addend;
2617
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2618
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2619
        /* return to same privilege level */
2620
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2621
                       get_seg_base(e1, e2),
2622
                       get_seg_limit(e1, e2),
2623
                       e2);
2624
    } else {
2625
        /* return to different privilege level */
2626
#ifdef TARGET_X86_64
2627
        if (shift == 2) {
2628
            POPQ(sp, new_esp);
2629
            POPQ(sp, new_ss);
2630
            new_ss &= 0xffff;
2631
        } else
2632
#endif
2633
        if (shift == 1) {
2634
            /* 32 bits */
2635
            POPL(ssp, sp, sp_mask, new_esp);
2636
            POPL(ssp, sp, sp_mask, new_ss);
2637
            new_ss &= 0xffff;
2638
        } else {
2639
            /* 16 bits */
2640
            POPW(ssp, sp, sp_mask, new_esp);
2641
            POPW(ssp, sp, sp_mask, new_ss);
2642
        }
2643
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2644
                    new_ss, new_esp);
2645
        if ((new_ss & 0xfffc) == 0) {
2646
#ifdef TARGET_X86_64
2647
            /* NULL ss is allowed in long mode if cpl != 3*/
2648
            /* XXX: test CS64 ? */
2649
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2650
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2651
                                       0, 0xffffffff,
2652
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2653
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2654
                                       DESC_W_MASK | DESC_A_MASK);
2655
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2656
            } else
2657
#endif
2658
            {
2659
                raise_exception_err(EXCP0D_GPF, 0);
2660
            }
2661
        } else {
2662
            if ((new_ss & 3) != rpl)
2663
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2664
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2665
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2666
            if (!(ss_e2 & DESC_S_MASK) ||
2667
                (ss_e2 & DESC_CS_MASK) ||
2668
                !(ss_e2 & DESC_W_MASK))
2669
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2670
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2671
            if (dpl != rpl)
2672
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2673
            if (!(ss_e2 & DESC_P_MASK))
2674
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2675
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2676
                                   get_seg_base(ss_e1, ss_e2),
2677
                                   get_seg_limit(ss_e1, ss_e2),
2678
                                   ss_e2);
2679
        }
2680

    
2681
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2682
                       get_seg_base(e1, e2),
2683
                       get_seg_limit(e1, e2),
2684
                       e2);
2685
        cpu_x86_set_cpl(env, rpl);
2686
        sp = new_esp;
2687
#ifdef TARGET_X86_64
2688
        if (env->hflags & HF_CS64_MASK)
2689
            sp_mask = -1;
2690
        else
2691
#endif
2692
            sp_mask = get_sp_mask(ss_e2);
2693

    
2694
        /* validate data segments */
2695
        validate_seg(R_ES, rpl);
2696
        validate_seg(R_DS, rpl);
2697
        validate_seg(R_FS, rpl);
2698
        validate_seg(R_GS, rpl);
2699

    
2700
        sp += addend;
2701
    }
2702
    SET_ESP(sp, sp_mask);
2703
    env->eip = new_eip;
2704
    if (is_iret) {
2705
        /* NOTE: 'cpl' is the _old_ CPL */
2706
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2707
        if (cpl == 0)
2708
            eflags_mask |= IOPL_MASK;
2709
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2710
        if (cpl <= iopl)
2711
            eflags_mask |= IF_MASK;
2712
        if (shift == 0)
2713
            eflags_mask &= 0xffff;
2714
        load_eflags(new_eflags, eflags_mask);
2715
    }
2716
    return;
2717

    
2718
 return_to_vm86:
2719
    POPL(ssp, sp, sp_mask, new_esp);
2720
    POPL(ssp, sp, sp_mask, new_ss);
2721
    POPL(ssp, sp, sp_mask, new_es);
2722
    POPL(ssp, sp, sp_mask, new_ds);
2723
    POPL(ssp, sp, sp_mask, new_fs);
2724
    POPL(ssp, sp, sp_mask, new_gs);
2725

    
2726
    /* modify processor state */
2727
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2728
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2729
    load_seg_vm(R_CS, new_cs & 0xffff);
2730
    cpu_x86_set_cpl(env, 3);
2731
    load_seg_vm(R_SS, new_ss & 0xffff);
2732
    load_seg_vm(R_ES, new_es & 0xffff);
2733
    load_seg_vm(R_DS, new_ds & 0xffff);
2734
    load_seg_vm(R_FS, new_fs & 0xffff);
2735
    load_seg_vm(R_GS, new_gs & 0xffff);
2736

    
2737
    env->eip = new_eip & 0xffff;
2738
    ESP = new_esp;
2739
}
2740

    
2741
void helper_iret_protected(int shift, int next_eip)
2742
{
2743
    int tss_selector, type;
2744
    uint32_t e1, e2;
2745

    
2746
    /* specific case for TSS */
2747
    if (env->eflags & NT_MASK) {
2748
#ifdef TARGET_X86_64
2749
        if (env->hflags & HF_LMA_MASK)
2750
            raise_exception_err(EXCP0D_GPF, 0);
2751
#endif
2752
        tss_selector = lduw_kernel(env->tr.base + 0);
2753
        if (tss_selector & 4)
2754
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2755
        if (load_segment(&e1, &e2, tss_selector) != 0)
2756
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2757
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2758
        /* NOTE: we check both segment and busy TSS */
2759
        if (type != 3)
2760
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2761
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2762
    } else {
2763
        helper_ret_protected(shift, 1, 0);
2764
    }
2765
    env->hflags2 &= ~HF2_NMI_MASK;
2766
#ifdef USE_KQEMU
2767
    if (kqemu_is_ok(env)) {
2768
        CC_OP = CC_OP_EFLAGS;
2769
        env->exception_index = -1;
2770
        cpu_loop_exit();
2771
    }
2772
#endif
2773
}
2774

    
2775
void helper_lret_protected(int shift, int addend)
2776
{
2777
    helper_ret_protected(shift, 0, addend);
2778
#ifdef USE_KQEMU
2779
    if (kqemu_is_ok(env)) {
2780
        env->exception_index = -1;
2781
        cpu_loop_exit();
2782
    }
2783
#endif
2784
}
2785

    
2786
void helper_sysenter(void)
2787
{
2788
    if (env->sysenter_cs == 0) {
2789
        raise_exception_err(EXCP0D_GPF, 0);
2790
    }
2791
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2792
    cpu_x86_set_cpl(env, 0);
2793

    
2794
#ifdef TARGET_X86_64
2795
    if (env->hflags & HF_LMA_MASK) {
2796
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2797
                               0, 0xffffffff,
2798
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2799
                               DESC_S_MASK |
2800
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2801
    } else
2802
#endif
2803
    {
2804
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2805
                               0, 0xffffffff,
2806
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2807
                               DESC_S_MASK |
2808
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2809
    }
2810
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2811
                           0, 0xffffffff,
2812
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2813
                           DESC_S_MASK |
2814
                           DESC_W_MASK | DESC_A_MASK);
2815
    ESP = env->sysenter_esp;
2816
    EIP = env->sysenter_eip;
2817
}
2818

    
2819
void helper_sysexit(int dflag)
2820
{
2821
    int cpl;
2822

    
2823
    cpl = env->hflags & HF_CPL_MASK;
2824
    if (env->sysenter_cs == 0 || cpl != 0) {
2825
        raise_exception_err(EXCP0D_GPF, 0);
2826
    }
2827
    cpu_x86_set_cpl(env, 3);
2828
#ifdef TARGET_X86_64
2829
    if (dflag == 2) {
2830
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2831
                               0, 0xffffffff,
2832
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2833
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2834
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2835
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2836
                               0, 0xffffffff,
2837
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2838
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2839
                               DESC_W_MASK | DESC_A_MASK);
2840
    } else
2841
#endif
2842
    {
2843
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2844
                               0, 0xffffffff,
2845
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2848
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2849
                               0, 0xffffffff,
2850
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2851
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2852
                               DESC_W_MASK | DESC_A_MASK);
2853
    }
2854
    ESP = ECX;
2855
    EIP = EDX;
2856
#ifdef USE_KQEMU
2857
    if (kqemu_is_ok(env)) {
2858
        env->exception_index = -1;
2859
        cpu_loop_exit();
2860
    }
2861
#endif
2862
}
2863

    
2864
#if defined(CONFIG_USER_ONLY)
2865
target_ulong helper_read_crN(int reg)
2866
{
2867
    return 0;
2868
}
2869

    
2870
void helper_write_crN(int reg, target_ulong t0)
2871
{
2872
}
2873

    
2874
void helper_movl_drN_T0(int reg, target_ulong t0)
2875
{
2876
}
2877
#else
2878
target_ulong helper_read_crN(int reg)
2879
{
2880
    target_ulong val;
2881

    
2882
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2883
    switch(reg) {
2884
    default:
2885
        val = env->cr[reg];
2886
        break;
2887
    case 8:
2888
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2889
            val = cpu_get_apic_tpr(env);
2890
        } else {
2891
            val = env->v_tpr;
2892
        }
2893
        break;
2894
    }
2895
    return val;
2896
}
2897

    
2898
void helper_write_crN(int reg, target_ulong t0)
2899
{
2900
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2901
    switch(reg) {
2902
    case 0:
2903
        cpu_x86_update_cr0(env, t0);
2904
        break;
2905
    case 3:
2906
        cpu_x86_update_cr3(env, t0);
2907
        break;
2908
    case 4:
2909
        cpu_x86_update_cr4(env, t0);
2910
        break;
2911
    case 8:
2912
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2913
            cpu_set_apic_tpr(env, t0);
2914
        }
2915
        env->v_tpr = t0 & 0x0f;
2916
        break;
2917
    default:
2918
        env->cr[reg] = t0;
2919
        break;
2920
    }
2921
}
2922

    
2923
void helper_movl_drN_T0(int reg, target_ulong t0)
2924
{
2925
    int i;
2926

    
2927
    if (reg < 4) {
2928
        hw_breakpoint_remove(env, reg);
2929
        env->dr[reg] = t0;
2930
        hw_breakpoint_insert(env, reg);
2931
    } else if (reg == 7) {
2932
        for (i = 0; i < 4; i++)
2933
            hw_breakpoint_remove(env, i);
2934
        env->dr[7] = t0;
2935
        for (i = 0; i < 4; i++)
2936
            hw_breakpoint_insert(env, i);
2937
    } else
2938
        env->dr[reg] = t0;
2939
}
2940
#endif
2941

    
2942
void helper_lmsw(target_ulong t0)
2943
{
2944
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2945
       if already set to one. */
2946
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2947
    helper_write_crN(0, t0);
2948
}
2949

    
2950
void helper_clts(void)
2951
{
2952
    env->cr[0] &= ~CR0_TS_MASK;
2953
    env->hflags &= ~HF_TS_MASK;
2954
}
2955

    
2956
void helper_invlpg(target_ulong addr)
2957
{
2958
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2959
    tlb_flush_page(env, addr);
2960
}
2961

    
2962
void helper_rdtsc(void)
2963
{
2964
    uint64_t val;
2965

    
2966
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2967
        raise_exception(EXCP0D_GPF);
2968
    }
2969
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2970

    
2971
    val = cpu_get_tsc(env) + env->tsc_offset;
2972
    EAX = (uint32_t)(val);
2973
    EDX = (uint32_t)(val >> 32);
2974
}
2975

    
2976
void helper_rdpmc(void)
2977
{
2978
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2979
        raise_exception(EXCP0D_GPF);
2980
    }
2981
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2982
    
2983
    /* currently unimplemented */
2984
    raise_exception_err(EXCP06_ILLOP, 0);
2985
}
2986

    
2987
#if defined(CONFIG_USER_ONLY)
2988
void helper_wrmsr(void)
2989
{
2990
}
2991

    
2992
void helper_rdmsr(void)
2993
{
2994
}
2995
#else
2996
void helper_wrmsr(void)
2997
{
2998
    uint64_t val;
2999

    
3000
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3001

    
3002
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3003

    
3004
    switch((uint32_t)ECX) {
3005
    case MSR_IA32_SYSENTER_CS:
3006
        env->sysenter_cs = val & 0xffff;
3007
        break;
3008
    case MSR_IA32_SYSENTER_ESP:
3009
        env->sysenter_esp = val;
3010
        break;
3011
    case MSR_IA32_SYSENTER_EIP:
3012
        env->sysenter_eip = val;
3013
        break;
3014
    case MSR_IA32_APICBASE:
3015
        cpu_set_apic_base(env, val);
3016
        break;
3017
    case MSR_EFER:
3018
        {
3019
            uint64_t update_mask;
3020
            update_mask = 0;
3021
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3022
                update_mask |= MSR_EFER_SCE;
3023
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3024
                update_mask |= MSR_EFER_LME;
3025
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3026
                update_mask |= MSR_EFER_FFXSR;
3027
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3028
                update_mask |= MSR_EFER_NXE;
3029
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3030
                update_mask |= MSR_EFER_SVME;
3031
            cpu_load_efer(env, (env->efer & ~update_mask) |
3032
                          (val & update_mask));
3033
        }
3034
        break;
3035
    case MSR_STAR:
3036
        env->star = val;
3037
        break;
3038
    case MSR_PAT:
3039
        env->pat = val;
3040
        break;
3041
    case MSR_VM_HSAVE_PA:
3042
        env->vm_hsave = val;
3043
        break;
3044
#ifdef TARGET_X86_64
3045
    case MSR_LSTAR:
3046
        env->lstar = val;
3047
        break;
3048
    case MSR_CSTAR:
3049
        env->cstar = val;
3050
        break;
3051
    case MSR_FMASK:
3052
        env->fmask = val;
3053
        break;
3054
    case MSR_FSBASE:
3055
        env->segs[R_FS].base = val;
3056
        break;
3057
    case MSR_GSBASE:
3058
        env->segs[R_GS].base = val;
3059
        break;
3060
    case MSR_KERNELGSBASE:
3061
        env->kernelgsbase = val;
3062
        break;
3063
#endif
3064
    default:
3065
        /* XXX: exception ? */
3066
        break;
3067
    }
3068
}
3069

    
3070
void helper_rdmsr(void)
3071
{
3072
    uint64_t val;
3073

    
3074
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3075

    
3076
    switch((uint32_t)ECX) {
3077
    case MSR_IA32_SYSENTER_CS:
3078
        val = env->sysenter_cs;
3079
        break;
3080
    case MSR_IA32_SYSENTER_ESP:
3081
        val = env->sysenter_esp;
3082
        break;
3083
    case MSR_IA32_SYSENTER_EIP:
3084
        val = env->sysenter_eip;
3085
        break;
3086
    case MSR_IA32_APICBASE:
3087
        val = cpu_get_apic_base(env);
3088
        break;
3089
    case MSR_EFER:
3090
        val = env->efer;
3091
        break;
3092
    case MSR_STAR:
3093
        val = env->star;
3094
        break;
3095
    case MSR_PAT:
3096
        val = env->pat;
3097
        break;
3098
    case MSR_VM_HSAVE_PA:
3099
        val = env->vm_hsave;
3100
        break;
3101
    case MSR_IA32_PERF_STATUS:
3102
        /* tsc_increment_by_tick */
3103
        val = 1000ULL;
3104
        /* CPU multiplier */
3105
        val |= (((uint64_t)4ULL) << 40);
3106
        break;
3107
#ifdef TARGET_X86_64
3108
    case MSR_LSTAR:
3109
        val = env->lstar;
3110
        break;
3111
    case MSR_CSTAR:
3112
        val = env->cstar;
3113
        break;
3114
    case MSR_FMASK:
3115
        val = env->fmask;
3116
        break;
3117
    case MSR_FSBASE:
3118
        val = env->segs[R_FS].base;
3119
        break;
3120
    case MSR_GSBASE:
3121
        val = env->segs[R_GS].base;
3122
        break;
3123
    case MSR_KERNELGSBASE:
3124
        val = env->kernelgsbase;
3125
        break;
3126
#endif
3127
#ifdef USE_KQEMU
3128
    case MSR_QPI_COMMBASE:
3129
        if (env->kqemu_enabled) {
3130
            val = kqemu_comm_base;
3131
        } else {
3132
            val = 0;
3133
        }
3134
        break;
3135
#endif
3136
    default:
3137
        /* XXX: exception ? */
3138
        val = 0;
3139
        break;
3140
    }
3141
    EAX = (uint32_t)(val);
3142
    EDX = (uint32_t)(val >> 32);
3143
}
3144
#endif
3145

    
3146
target_ulong helper_lsl(target_ulong selector1)
3147
{
3148
    unsigned int limit;
3149
    uint32_t e1, e2, eflags, selector;
3150
    int rpl, dpl, cpl, type;
3151

    
3152
    selector = selector1 & 0xffff;
3153
    eflags = helper_cc_compute_all(CC_OP);
3154
    if (load_segment(&e1, &e2, selector) != 0)
3155
        goto fail;
3156
    rpl = selector & 3;
3157
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3158
    cpl = env->hflags & HF_CPL_MASK;
3159
    if (e2 & DESC_S_MASK) {
3160
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3161
            /* conforming */
3162
        } else {
3163
            if (dpl < cpl || dpl < rpl)
3164
                goto fail;
3165
        }
3166
    } else {
3167
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3168
        switch(type) {
3169
        case 1:
3170
        case 2:
3171
        case 3:
3172
        case 9:
3173
        case 11:
3174
            break;
3175
        default:
3176
            goto fail;
3177
        }
3178
        if (dpl < cpl || dpl < rpl) {
3179
        fail:
3180
            CC_SRC = eflags & ~CC_Z;
3181
            return 0;
3182
        }
3183
    }
3184
    limit = get_seg_limit(e1, e2);
3185
    CC_SRC = eflags | CC_Z;
3186
    return limit;
3187
}
3188

    
3189
target_ulong helper_lar(target_ulong selector1)
3190
{
3191
    uint32_t e1, e2, eflags, selector;
3192
    int rpl, dpl, cpl, type;
3193

    
3194
    selector = selector1 & 0xffff;
3195
    eflags = helper_cc_compute_all(CC_OP);
3196
    if ((selector & 0xfffc) == 0)
3197
        goto fail;
3198
    if (load_segment(&e1, &e2, selector) != 0)
3199
        goto fail;
3200
    rpl = selector & 3;
3201
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3202
    cpl = env->hflags & HF_CPL_MASK;
3203
    if (e2 & DESC_S_MASK) {
3204
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3205
            /* conforming */
3206
        } else {
3207
            if (dpl < cpl || dpl < rpl)
3208
                goto fail;
3209
        }
3210
    } else {
3211
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3212
        switch(type) {
3213
        case 1:
3214
        case 2:
3215
        case 3:
3216
        case 4:
3217
        case 5:
3218
        case 9:
3219
        case 11:
3220
        case 12:
3221
            break;
3222
        default:
3223
            goto fail;
3224
        }
3225
        if (dpl < cpl || dpl < rpl) {
3226
        fail:
3227
            CC_SRC = eflags & ~CC_Z;
3228
            return 0;
3229
        }
3230
    }
3231
    CC_SRC = eflags | CC_Z;
3232
    return e2 & 0x00f0ff00;
3233
}
3234

    
3235
void helper_verr(target_ulong selector1)
3236
{
3237
    uint32_t e1, e2, eflags, selector;
3238
    int rpl, dpl, cpl;
3239

    
3240
    selector = selector1 & 0xffff;
3241
    eflags = helper_cc_compute_all(CC_OP);
3242
    if ((selector & 0xfffc) == 0)
3243
        goto fail;
3244
    if (load_segment(&e1, &e2, selector) != 0)
3245
        goto fail;
3246
    if (!(e2 & DESC_S_MASK))
3247
        goto fail;
3248
    rpl = selector & 3;
3249
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3250
    cpl = env->hflags & HF_CPL_MASK;
3251
    if (e2 & DESC_CS_MASK) {
3252
        if (!(e2 & DESC_R_MASK))
3253
            goto fail;
3254
        if (!(e2 & DESC_C_MASK)) {
3255
            if (dpl < cpl || dpl < rpl)
3256
                goto fail;
3257
        }
3258
    } else {
3259
        if (dpl < cpl || dpl < rpl) {
3260
        fail:
3261
            CC_SRC = eflags & ~CC_Z;
3262
            return;
3263
        }
3264
    }
3265
    CC_SRC = eflags | CC_Z;
3266
}
3267

    
3268
void helper_verw(target_ulong selector1)
3269
{
3270
    uint32_t e1, e2, eflags, selector;
3271
    int rpl, dpl, cpl;
3272

    
3273
    selector = selector1 & 0xffff;
3274
    eflags = helper_cc_compute_all(CC_OP);
3275
    if ((selector & 0xfffc) == 0)
3276
        goto fail;
3277
    if (load_segment(&e1, &e2, selector) != 0)
3278
        goto fail;
3279
    if (!(e2 & DESC_S_MASK))
3280
        goto fail;
3281
    rpl = selector & 3;
3282
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3283
    cpl = env->hflags & HF_CPL_MASK;
3284
    if (e2 & DESC_CS_MASK) {
3285
        goto fail;
3286
    } else {
3287
        if (dpl < cpl || dpl < rpl)
3288
            goto fail;
3289
        if (!(e2 & DESC_W_MASK)) {
3290
        fail:
3291
            CC_SRC = eflags & ~CC_Z;
3292
            return;
3293
        }
3294
    }
3295
    CC_SRC = eflags | CC_Z;
3296
}
3297

    
3298
/* x87 FPU helpers */
3299

    
3300
static void fpu_set_exception(int mask)
3301
{
3302
    env->fpus |= mask;
3303
    if (env->fpus & (~env->fpuc & FPUC_EM))
3304
        env->fpus |= FPUS_SE | FPUS_B;
3305
}
3306

    
3307
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3308
{
3309
    if (b == 0.0)
3310
        fpu_set_exception(FPUS_ZE);
3311
    return a / b;
3312
}
3313

    
3314
static void fpu_raise_exception(void)
3315
{
3316
    if (env->cr[0] & CR0_NE_MASK) {
3317
        raise_exception(EXCP10_COPR);
3318
    }
3319
#if !defined(CONFIG_USER_ONLY)
3320
    else {
3321
        cpu_set_ferr(env);
3322
    }
3323
#endif
3324
}
3325

    
3326
void helper_flds_FT0(uint32_t val)
3327
{
3328
    union {
3329
        float32 f;
3330
        uint32_t i;
3331
    } u;
3332
    u.i = val;
3333
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3334
}
3335

    
3336
void helper_fldl_FT0(uint64_t val)
3337
{
3338
    union {
3339
        float64 f;
3340
        uint64_t i;
3341
    } u;
3342
    u.i = val;
3343
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3344
}
3345

    
3346
void helper_fildl_FT0(int32_t val)
3347
{
3348
    FT0 = int32_to_floatx(val, &env->fp_status);
3349
}
3350

    
3351
void helper_flds_ST0(uint32_t val)
3352
{
3353
    int new_fpstt;
3354
    union {
3355
        float32 f;
3356
        uint32_t i;
3357
    } u;
3358
    new_fpstt = (env->fpstt - 1) & 7;
3359
    u.i = val;
3360
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3361
    env->fpstt = new_fpstt;
3362
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3363
}
3364

    
3365
void helper_fldl_ST0(uint64_t val)
3366
{
3367
    int new_fpstt;
3368
    union {
3369
        float64 f;
3370
        uint64_t i;
3371
    } u;
3372
    new_fpstt = (env->fpstt - 1) & 7;
3373
    u.i = val;
3374
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3375
    env->fpstt = new_fpstt;
3376
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3377
}
3378

    
3379
void helper_fildl_ST0(int32_t val)
3380
{
3381
    int new_fpstt;
3382
    new_fpstt = (env->fpstt - 1) & 7;
3383
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3384
    env->fpstt = new_fpstt;
3385
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3386
}
3387

    
3388
void helper_fildll_ST0(int64_t val)
3389
{
3390
    int new_fpstt;
3391
    new_fpstt = (env->fpstt - 1) & 7;
3392
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3393
    env->fpstt = new_fpstt;
3394
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3395
}
3396

    
3397
uint32_t helper_fsts_ST0(void)
3398
{
3399
    union {
3400
        float32 f;
3401
        uint32_t i;
3402
    } u;
3403
    u.f = floatx_to_float32(ST0, &env->fp_status);
3404
    return u.i;
3405
}
3406

    
3407
uint64_t helper_fstl_ST0(void)
3408
{
3409
    union {
3410
        float64 f;
3411
        uint64_t i;
3412
    } u;
3413
    u.f = floatx_to_float64(ST0, &env->fp_status);
3414
    return u.i;
3415
}
3416

    
3417
int32_t helper_fist_ST0(void)
3418
{
3419
    int32_t val;
3420
    val = floatx_to_int32(ST0, &env->fp_status);
3421
    if (val != (int16_t)val)
3422
        val = -32768;
3423
    return val;
3424
}
3425

    
3426
int32_t helper_fistl_ST0(void)
3427
{
3428
    int32_t val;
3429
    val = floatx_to_int32(ST0, &env->fp_status);
3430
    return val;
3431
}
3432

    
3433
int64_t helper_fistll_ST0(void)
3434
{
3435
    int64_t val;
3436
    val = floatx_to_int64(ST0, &env->fp_status);
3437
    return val;
3438
}
3439

    
3440
int32_t helper_fistt_ST0(void)
3441
{
3442
    int32_t val;
3443
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3444
    if (val != (int16_t)val)
3445
        val = -32768;
3446
    return val;
3447
}
3448

    
3449
int32_t helper_fisttl_ST0(void)
3450
{
3451
    int32_t val;
3452
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3453
    return val;
3454
}
3455

    
3456
int64_t helper_fisttll_ST0(void)
3457
{
3458
    int64_t val;
3459
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3460
    return val;
3461
}
3462

    
3463
void helper_fldt_ST0(target_ulong ptr)
3464
{
3465
    int new_fpstt;
3466
    new_fpstt = (env->fpstt - 1) & 7;
3467
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3468
    env->fpstt = new_fpstt;
3469
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3470
}
3471

    
3472
void helper_fstt_ST0(target_ulong ptr)
3473
{
3474
    helper_fstt(ST0, ptr);
3475
}
3476

    
3477
void helper_fpush(void)
3478
{
3479
    fpush();
3480
}
3481

    
3482
void helper_fpop(void)
3483
{
3484
    fpop();
3485
}
3486

    
3487
void helper_fdecstp(void)
3488
{
3489
    env->fpstt = (env->fpstt - 1) & 7;
3490
    env->fpus &= (~0x4700);
3491
}
3492

    
3493
void helper_fincstp(void)
3494
{
3495
    env->fpstt = (env->fpstt + 1) & 7;
3496
    env->fpus &= (~0x4700);
3497
}
3498

    
3499
/* FPU move */
3500

    
3501
void helper_ffree_STN(int st_index)
3502
{
3503
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3504
}
3505

    
3506
void helper_fmov_ST0_FT0(void)
3507
{
3508
    ST0 = FT0;
3509
}
3510

    
3511
void helper_fmov_FT0_STN(int st_index)
3512
{
3513
    FT0 = ST(st_index);
3514
}
3515

    
3516
void helper_fmov_ST0_STN(int st_index)
3517
{
3518
    ST0 = ST(st_index);
3519
}
3520

    
3521
void helper_fmov_STN_ST0(int st_index)
3522
{
3523
    ST(st_index) = ST0;
3524
}
3525

    
3526
void helper_fxchg_ST0_STN(int st_index)
3527
{
3528
    CPU86_LDouble tmp;
3529
    tmp = ST(st_index);
3530
    ST(st_index) = ST0;
3531
    ST0 = tmp;
3532
}
3533

    
3534
/* FPU operations */
3535

    
3536
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3537

    
3538
void helper_fcom_ST0_FT0(void)
3539
{
3540
    int ret;
3541

    
3542
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3543
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3544
}
3545

    
3546
void helper_fucom_ST0_FT0(void)
3547
{
3548
    int ret;
3549

    
3550
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3551
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3552
}
3553

    
3554
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3555

    
3556
void helper_fcomi_ST0_FT0(void)
3557
{
3558
    int eflags;
3559
    int ret;
3560

    
3561
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3562
    eflags = helper_cc_compute_all(CC_OP);
3563
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3564
    CC_SRC = eflags;
3565
}
3566

    
3567
void helper_fucomi_ST0_FT0(void)
3568
{
3569
    int eflags;
3570
    int ret;
3571

    
3572
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3573
    eflags = helper_cc_compute_all(CC_OP);
3574
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3575
    CC_SRC = eflags;
3576
}
3577

    
3578
void helper_fadd_ST0_FT0(void)
3579
{
3580
    ST0 += FT0;
3581
}
3582

    
3583
void helper_fmul_ST0_FT0(void)
3584
{
3585
    ST0 *= FT0;
3586
}
3587

    
3588
void helper_fsub_ST0_FT0(void)
3589
{
3590
    ST0 -= FT0;
3591
}
3592

    
3593
void helper_fsubr_ST0_FT0(void)
3594
{
3595
    ST0 = FT0 - ST0;
3596
}
3597

    
3598
void helper_fdiv_ST0_FT0(void)
3599
{
3600
    ST0 = helper_fdiv(ST0, FT0);
3601
}
3602

    
3603
void helper_fdivr_ST0_FT0(void)
3604
{
3605
    ST0 = helper_fdiv(FT0, ST0);
3606
}
3607

    
3608
/* fp operations between STN and ST0 */
3609

    
3610
void helper_fadd_STN_ST0(int st_index)
3611
{
3612
    ST(st_index) += ST0;
3613
}
3614

    
3615
void helper_fmul_STN_ST0(int st_index)
3616
{
3617
    ST(st_index) *= ST0;
3618
}
3619

    
3620
void helper_fsub_STN_ST0(int st_index)
3621
{
3622
    ST(st_index) -= ST0;
3623
}
3624

    
3625
void helper_fsubr_STN_ST0(int st_index)
3626
{
3627
    CPU86_LDouble *p;
3628
    p = &ST(st_index);
3629
    *p = ST0 - *p;
3630
}
3631

    
3632
void helper_fdiv_STN_ST0(int st_index)
3633
{
3634
    CPU86_LDouble *p;
3635
    p = &ST(st_index);
3636
    *p = helper_fdiv(*p, ST0);
3637
}
3638

    
3639
void helper_fdivr_STN_ST0(int st_index)
3640
{
3641
    CPU86_LDouble *p;
3642
    p = &ST(st_index);
3643
    *p = helper_fdiv(ST0, *p);
3644
}
3645

    
3646
/* misc FPU operations */
3647
void helper_fchs_ST0(void)
3648
{
3649
    ST0 = floatx_chs(ST0);
3650
}
3651

    
3652
void helper_fabs_ST0(void)
3653
{
3654
    ST0 = floatx_abs(ST0);
3655
}
3656

    
3657
void helper_fld1_ST0(void)
3658
{
3659
    ST0 = f15rk[1];
3660
}
3661

    
3662
void helper_fldl2t_ST0(void)
3663
{
3664
    ST0 = f15rk[6];
3665
}
3666

    
3667
void helper_fldl2e_ST0(void)
3668
{
3669
    ST0 = f15rk[5];
3670
}
3671

    
3672
void helper_fldpi_ST0(void)
3673
{
3674
    ST0 = f15rk[2];
3675
}
3676

    
3677
void helper_fldlg2_ST0(void)
3678
{
3679
    ST0 = f15rk[3];
3680
}
3681

    
3682
void helper_fldln2_ST0(void)
3683
{
3684
    ST0 = f15rk[4];
3685
}
3686

    
3687
void helper_fldz_ST0(void)
3688
{
3689
    ST0 = f15rk[0];
3690
}
3691

    
3692
void helper_fldz_FT0(void)
3693
{
3694
    FT0 = f15rk[0];
3695
}
3696

    
3697
uint32_t helper_fnstsw(void)
3698
{
3699
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3700
}
3701

    
3702
uint32_t helper_fnstcw(void)
3703
{
3704
    return env->fpuc;
3705
}
3706

    
3707
static void update_fp_status(void)
3708
{
3709
    int rnd_type;
3710

    
3711
    /* set rounding mode */
3712
    switch(env->fpuc & RC_MASK) {
3713
    default:
3714
    case RC_NEAR:
3715
        rnd_type = float_round_nearest_even;
3716
        break;
3717
    case RC_DOWN:
3718
        rnd_type = float_round_down;
3719
        break;
3720
    case RC_UP:
3721
        rnd_type = float_round_up;
3722
        break;
3723
    case RC_CHOP:
3724
        rnd_type = float_round_to_zero;
3725
        break;
3726
    }
3727
    set_float_rounding_mode(rnd_type, &env->fp_status);
3728
#ifdef FLOATX80
3729
    switch((env->fpuc >> 8) & 3) {
3730
    case 0:
3731
        rnd_type = 32;
3732
        break;
3733
    case 2:
3734
        rnd_type = 64;
3735
        break;
3736
    case 3:
3737
    default:
3738
        rnd_type = 80;
3739
        break;
3740
    }
3741
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3742
#endif
3743
}
3744

    
3745
void helper_fldcw(uint32_t val)
3746
{
3747
    env->fpuc = val;
3748
    update_fp_status();
3749
}
3750

    
3751
void helper_fclex(void)
3752
{
3753
    env->fpus &= 0x7f00;
3754
}
3755

    
3756
void helper_fwait(void)
3757
{
3758
    if (env->fpus & FPUS_SE)
3759
        fpu_raise_exception();
3760
}
3761

    
3762
void helper_fninit(void)
3763
{
3764
    env->fpus = 0;
3765
    env->fpstt = 0;
3766
    env->fpuc = 0x37f;
3767
    env->fptags[0] = 1;
3768
    env->fptags[1] = 1;
3769
    env->fptags[2] = 1;
3770
    env->fptags[3] = 1;
3771
    env->fptags[4] = 1;
3772
    env->fptags[5] = 1;
3773
    env->fptags[6] = 1;
3774
    env->fptags[7] = 1;
3775
}
3776

    
3777
/* BCD ops */
3778

    
3779
void helper_fbld_ST0(target_ulong ptr)
3780
{
3781
    CPU86_LDouble tmp;
3782
    uint64_t val;
3783
    unsigned int v;
3784
    int i;
3785

    
3786
    val = 0;
3787
    for(i = 8; i >= 0; i--) {
3788
        v = ldub(ptr + i);
3789
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3790
    }
3791
    tmp = val;
3792
    if (ldub(ptr + 9) & 0x80)
3793
        tmp = -tmp;
3794
    fpush();
3795
    ST0 = tmp;
3796
}
3797

    
3798
void helper_fbst_ST0(target_ulong ptr)
3799
{
3800
    int v;
3801
    target_ulong mem_ref, mem_end;
3802
    int64_t val;
3803

    
3804
    val = floatx_to_int64(ST0, &env->fp_status);
3805
    mem_ref = ptr;
3806
    mem_end = mem_ref + 9;
3807
    if (val < 0) {
3808
        stb(mem_end, 0x80);
3809
        val = -val;
3810
    } else {
3811
        stb(mem_end, 0x00);
3812
    }
3813
    while (mem_ref < mem_end) {
3814
        if (val == 0)
3815
            break;
3816
        v = val % 100;
3817
        val = val / 100;
3818
        v = ((v / 10) << 4) | (v % 10);
3819
        stb(mem_ref++, v);
3820
    }
3821
    while (mem_ref < mem_end) {
3822
        stb(mem_ref++, 0);
3823
    }
3824
}
3825

    
3826
void helper_f2xm1(void)
3827
{
3828
    ST0 = pow(2.0,ST0) - 1.0;
3829
}
3830

    
3831
void helper_fyl2x(void)
3832
{
3833
    CPU86_LDouble fptemp;
3834

    
3835
    fptemp = ST0;
3836
    if (fptemp>0.0){
3837
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3838
        ST1 *= fptemp;
3839
        fpop();
3840
    } else {
3841
        env->fpus &= (~0x4700);
3842
        env->fpus |= 0x400;
3843
    }
3844
}
3845

    
3846
void helper_fptan(void)
3847
{
3848
    CPU86_LDouble fptemp;
3849

    
3850
    fptemp = ST0;
3851
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3852
        env->fpus |= 0x400;
3853
    } else {
3854
        ST0 = tan(fptemp);
3855
        fpush();
3856
        ST0 = 1.0;
3857
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3858
        /* the above code is for  |arg| < 2**52 only */
3859
    }
3860
}
3861

    
3862
void helper_fpatan(void)
3863
{
3864
    CPU86_LDouble fptemp, fpsrcop;
3865

    
3866
    fpsrcop = ST1;
3867
    fptemp = ST0;
3868
    ST1 = atan2(fpsrcop,fptemp);
3869
    fpop();
3870
}
3871

    
3872
void helper_fxtract(void)
3873
{
3874
    CPU86_LDoubleU temp;
3875
    unsigned int expdif;
3876

    
3877
    temp.d = ST0;
3878
    expdif = EXPD(temp) - EXPBIAS;
3879
    /*DP exponent bias*/
3880
    ST0 = expdif;
3881
    fpush();
3882
    BIASEXPONENT(temp);
3883
    ST0 = temp.d;
3884
}
3885

    
3886
void helper_fprem1(void)
3887
{
3888
    CPU86_LDouble dblq, fpsrcop, fptemp;
3889
    CPU86_LDoubleU fpsrcop1, fptemp1;
3890
    int expdif;
3891
    signed long long int q;
3892

    
3893
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3894
        ST0 = 0.0 / 0.0; /* NaN */
3895
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3896
        return;
3897
    }
3898

    
3899
    fpsrcop = ST0;
3900
    fptemp = ST1;
3901
    fpsrcop1.d = fpsrcop;
3902
    fptemp1.d = fptemp;
3903
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3904

    
3905
    if (expdif < 0) {
3906
        /* optimisation? taken from the AMD docs */
3907
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3908
        /* ST0 is unchanged */
3909
        return;
3910
    }
3911

    
3912
    if (expdif < 53) {
3913
        dblq = fpsrcop / fptemp;
3914
        /* round dblq towards nearest integer */
3915
        dblq = rint(dblq);
3916
        ST0 = fpsrcop - fptemp * dblq;
3917

    
3918
        /* convert dblq to q by truncating towards zero */
3919
        if (dblq < 0.0)
3920
           q = (signed long long int)(-dblq);
3921
        else
3922
           q = (signed long long int)dblq;
3923

    
3924
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3925
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3926
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3927
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3928
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3929
    } else {
3930
        env->fpus |= 0x400;  /* C2 <-- 1 */
3931
        fptemp = pow(2.0, expdif - 50);
3932
        fpsrcop = (ST0 / ST1) / fptemp;
3933
        /* fpsrcop = integer obtained by chopping */
3934
        fpsrcop = (fpsrcop < 0.0) ?
3935
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3936
        ST0 -= (ST1 * fpsrcop * fptemp);
3937
    }
3938
}
3939

    
3940
void helper_fprem(void)
3941
{
3942
    CPU86_LDouble dblq, fpsrcop, fptemp;
3943
    CPU86_LDoubleU fpsrcop1, fptemp1;
3944
    int expdif;
3945
    signed long long int q;
3946

    
3947
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3948
       ST0 = 0.0 / 0.0; /* NaN */
3949
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3950
       return;
3951
    }
3952

    
3953
    fpsrcop = (CPU86_LDouble)ST0;
3954
    fptemp = (CPU86_LDouble)ST1;
3955
    fpsrcop1.d = fpsrcop;
3956
    fptemp1.d = fptemp;
3957
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3958

    
3959
    if (expdif < 0) {
3960
        /* optimisation? taken from the AMD docs */
3961
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3962
        /* ST0 is unchanged */
3963
        return;
3964
    }
3965

    
3966
    if ( expdif < 53 ) {
3967
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3968
        /* round dblq towards zero */
3969
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3970
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3971

    
3972
        /* convert dblq to q by truncating towards zero */
3973
        if (dblq < 0.0)
3974
           q = (signed long long int)(-dblq);
3975
        else
3976
           q = (signed long long int)dblq;
3977

    
3978
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3979
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
3980
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
3981
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3982
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
3983
    } else {
3984
        int N = 32 + (expdif % 32); /* as per AMD docs */
3985
        env->fpus |= 0x400;  /* C2 <-- 1 */
3986
        fptemp = pow(2.0, (double)(expdif - N));
3987
        fpsrcop = (ST0 / ST1) / fptemp;
3988
        /* fpsrcop = integer obtained by chopping */
3989
        fpsrcop = (fpsrcop < 0.0) ?
3990
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3991
        ST0 -= (ST1 * fpsrcop * fptemp);
3992
    }
3993
}
3994

    
3995
void helper_fyl2xp1(void)
3996
{
3997
    CPU86_LDouble fptemp;
3998

    
3999
    fptemp = ST0;
4000
    if ((fptemp+1.0)>0.0) {
4001
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4002
        ST1 *= fptemp;
4003
        fpop();
4004
    } else {
4005
        env->fpus &= (~0x4700);
4006
        env->fpus |= 0x400;
4007
    }
4008
}
4009

    
4010
void helper_fsqrt(void)
4011
{
4012
    CPU86_LDouble fptemp;
4013

    
4014
    fptemp = ST0;
4015
    if (fptemp<0.0) {
4016
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4017
        env->fpus |= 0x400;
4018
    }
4019
    ST0 = sqrt(fptemp);
4020
}
4021

    
4022
void helper_fsincos(void)
4023
{
4024
    CPU86_LDouble fptemp;
4025

    
4026
    fptemp = ST0;
4027
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4028
        env->fpus |= 0x400;
4029
    } else {
4030
        ST0 = sin(fptemp);
4031
        fpush();
4032
        ST0 = cos(fptemp);
4033
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4034
        /* the above code is for  |arg| < 2**63 only */
4035
    }
4036
}
4037

    
4038
void helper_frndint(void)
4039
{
4040
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4041
}
4042

    
4043
void helper_fscale(void)
4044
{
4045
    ST0 = ldexp (ST0, (int)(ST1));
4046
}
4047

    
4048
void helper_fsin(void)
4049
{
4050
    CPU86_LDouble fptemp;
4051

    
4052
    fptemp = ST0;
4053
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4054
        env->fpus |= 0x400;
4055
    } else {
4056
        ST0 = sin(fptemp);
4057
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4058
        /* the above code is for  |arg| < 2**53 only */
4059
    }
4060
}
4061

    
4062
void helper_fcos(void)
4063
{
4064
    CPU86_LDouble fptemp;
4065

    
4066
    fptemp = ST0;
4067
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4068
        env->fpus |= 0x400;
4069
    } else {
4070
        ST0 = cos(fptemp);
4071
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4072
        /* the above code is for  |arg5 < 2**63 only */
4073
    }
4074
}
4075

    
4076
void helper_fxam_ST0(void)
4077
{
4078
    CPU86_LDoubleU temp;
4079
    int expdif;
4080

    
4081
    temp.d = ST0;
4082

    
4083
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4084
    if (SIGND(temp))
4085
        env->fpus |= 0x200; /* C1 <-- 1 */
4086

    
4087
    /* XXX: test fptags too */
4088
    expdif = EXPD(temp);
4089
    if (expdif == MAXEXPD) {
4090
#ifdef USE_X86LDOUBLE
4091
        if (MANTD(temp) == 0x8000000000000000ULL)
4092
#else
4093
        if (MANTD(temp) == 0)
4094
#endif
4095
            env->fpus |=  0x500 /*Infinity*/;
4096
        else
4097
            env->fpus |=  0x100 /*NaN*/;
4098
    } else if (expdif == 0) {
4099
        if (MANTD(temp) == 0)
4100
            env->fpus |=  0x4000 /*Zero*/;
4101
        else
4102
            env->fpus |= 0x4400 /*Denormal*/;
4103
    } else {
4104
        env->fpus |= 0x400;
4105
    }
4106
}
4107

    
4108
void helper_fstenv(target_ulong ptr, int data32)
4109
{
4110
    int fpus, fptag, exp, i;
4111
    uint64_t mant;
4112
    CPU86_LDoubleU tmp;
4113

    
4114
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4115
    fptag = 0;
4116
    for (i=7; i>=0; i--) {
4117
        fptag <<= 2;
4118
        if (env->fptags[i]) {
4119
            fptag |= 3;
4120
        } else {
4121
            tmp.d = env->fpregs[i].d;
4122
            exp = EXPD(tmp);
4123
            mant = MANTD(tmp);
4124
            if (exp == 0 && mant == 0) {
4125
                /* zero */
4126
                fptag |= 1;
4127
            } else if (exp == 0 || exp == MAXEXPD
4128
#ifdef USE_X86LDOUBLE
4129
                       || (mant & (1LL << 63)) == 0
4130
#endif
4131
                       ) {
4132
                /* NaNs, infinity, denormal */
4133
                fptag |= 2;
4134
            }
4135
        }
4136
    }
4137
    if (data32) {
4138
        /* 32 bit */
4139
        stl(ptr, env->fpuc);
4140
        stl(ptr + 4, fpus);
4141
        stl(ptr + 8, fptag);
4142
        stl(ptr + 12, 0); /* fpip */
4143
        stl(ptr + 16, 0); /* fpcs */
4144
        stl(ptr + 20, 0); /* fpoo */
4145
        stl(ptr + 24, 0); /* fpos */
4146
    } else {
4147
        /* 16 bit */
4148
        stw(ptr, env->fpuc);
4149
        stw(ptr + 2, fpus);
4150
        stw(ptr + 4, fptag);
4151
        stw(ptr + 6, 0);
4152
        stw(ptr + 8, 0);
4153
        stw(ptr + 10, 0);
4154
        stw(ptr + 12, 0);
4155
    }
4156
}
4157

    
4158
void helper_fldenv(target_ulong ptr, int data32)
4159
{
4160
    int i, fpus, fptag;
4161

    
4162
    if (data32) {
4163
        env->fpuc = lduw(ptr);
4164
        fpus = lduw(ptr + 4);
4165
        fptag = lduw(ptr + 8);
4166
    }
4167
    else {
4168
        env->fpuc = lduw(ptr);
4169
        fpus = lduw(ptr + 2);
4170
        fptag = lduw(ptr + 4);
4171
    }
4172
    env->fpstt = (fpus >> 11) & 7;
4173
    env->fpus = fpus & ~0x3800;
4174
    for(i = 0;i < 8; i++) {
4175
        env->fptags[i] = ((fptag & 3) == 3);
4176
        fptag >>= 2;
4177
    }
4178
}
4179

    
4180
void helper_fsave(target_ulong ptr, int data32)
4181
{
4182
    CPU86_LDouble tmp;
4183
    int i;
4184

    
4185
    helper_fstenv(ptr, data32);
4186

    
4187
    ptr += (14 << data32);
4188
    for(i = 0;i < 8; i++) {
4189
        tmp = ST(i);
4190
        helper_fstt(tmp, ptr);
4191
        ptr += 10;
4192
    }
4193

    
4194
    /* fninit */
4195
    env->fpus = 0;
4196
    env->fpstt = 0;
4197
    env->fpuc = 0x37f;
4198
    env->fptags[0] = 1;
4199
    env->fptags[1] = 1;
4200
    env->fptags[2] = 1;
4201
    env->fptags[3] = 1;
4202
    env->fptags[4] = 1;
4203
    env->fptags[5] = 1;
4204
    env->fptags[6] = 1;
4205
    env->fptags[7] = 1;
4206
}
4207

    
4208
void helper_frstor(target_ulong ptr, int data32)
4209
{
4210
    CPU86_LDouble tmp;
4211
    int i;
4212

    
4213
    helper_fldenv(ptr, data32);
4214
    ptr += (14 << data32);
4215

    
4216
    for(i = 0;i < 8; i++) {
4217
        tmp = helper_fldt(ptr);
4218
        ST(i) = tmp;
4219
        ptr += 10;
4220
    }
4221
}
4222

    
4223
void helper_fxsave(target_ulong ptr, int data64)
4224
{
4225
    int fpus, fptag, i, nb_xmm_regs;
4226
    CPU86_LDouble tmp;
4227
    target_ulong addr;
4228

    
4229
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4230
    fptag = 0;
4231
    for(i = 0; i < 8; i++) {
4232
        fptag |= (env->fptags[i] << i);
4233
    }
4234
    stw(ptr, env->fpuc);
4235
    stw(ptr + 2, fpus);
4236
    stw(ptr + 4, fptag ^ 0xff);
4237
#ifdef TARGET_X86_64
4238
    if (data64) {
4239
        stq(ptr + 0x08, 0); /* rip */
4240
        stq(ptr + 0x10, 0); /* rdp */
4241
    } else 
4242
#endif
4243
    {
4244
        stl(ptr + 0x08, 0); /* eip */
4245
        stl(ptr + 0x0c, 0); /* sel  */
4246
        stl(ptr + 0x10, 0); /* dp */
4247
        stl(ptr + 0x14, 0); /* sel  */
4248
    }
4249

    
4250
    addr = ptr + 0x20;
4251
    for(i = 0;i < 8; i++) {
4252
        tmp = ST(i);
4253
        helper_fstt(tmp, addr);
4254
        addr += 16;
4255
    }
4256

    
4257
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4258
        /* XXX: finish it */
4259
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4260
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4261
        if (env->hflags & HF_CS64_MASK)
4262
            nb_xmm_regs = 16;
4263
        else
4264
            nb_xmm_regs = 8;
4265
        addr = ptr + 0xa0;
4266
        for(i = 0; i < nb_xmm_regs; i++) {
4267
            stq(addr, env->xmm_regs[i].XMM_Q(0));
4268
            stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4269
            addr += 16;
4270
        }
4271
    }
4272
}
4273

    
4274
void helper_fxrstor(target_ulong ptr, int data64)
4275
{
4276
    int i, fpus, fptag, nb_xmm_regs;
4277
    CPU86_LDouble tmp;
4278
    target_ulong addr;
4279

    
4280
    env->fpuc = lduw(ptr);
4281
    fpus = lduw(ptr + 2);
4282
    fptag = lduw(ptr + 4);
4283
    env->fpstt = (fpus >> 11) & 7;
4284
    env->fpus = fpus & ~0x3800;
4285
    fptag ^= 0xff;
4286
    for(i = 0;i < 8; i++) {
4287
        env->fptags[i] = ((fptag >> i) & 1);
4288
    }
4289

    
4290
    addr = ptr + 0x20;
4291
    for(i = 0;i < 8; i++) {
4292
        tmp = helper_fldt(addr);
4293
        ST(i) = tmp;
4294
        addr += 16;
4295
    }
4296

    
4297
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4298
        /* XXX: finish it */
4299
        env->mxcsr = ldl(ptr + 0x18);
4300
        //ldl(ptr + 0x1c);
4301
        if (env->hflags & HF_CS64_MASK)
4302
            nb_xmm_regs = 16;
4303
        else
4304
            nb_xmm_regs = 8;
4305
        addr = ptr + 0xa0;
4306
        for(i = 0; i < nb_xmm_regs; i++) {
4307
            env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4308
            env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4309
            addr += 16;
4310
        }
4311
    }
4312
}
4313

    
4314
#ifndef USE_X86LDOUBLE
4315

    
4316
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4317
{
4318
    CPU86_LDoubleU temp;
4319
    int e;
4320

    
4321
    temp.d = f;
4322
    /* mantissa */
4323
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4324
    /* exponent + sign */
4325
    e = EXPD(temp) - EXPBIAS + 16383;
4326
    e |= SIGND(temp) >> 16;
4327
    *pexp = e;
4328
}
4329

    
4330
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4331
{
4332
    CPU86_LDoubleU temp;
4333
    int e;
4334
    uint64_t ll;
4335

    
4336
    /* XXX: handle overflow ? */
4337
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4338
    e |= (upper >> 4) & 0x800; /* sign */
4339
    ll = (mant >> 11) & ((1LL << 52) - 1);
4340
#ifdef __arm__
4341
    temp.l.upper = (e << 20) | (ll >> 32);
4342
    temp.l.lower = ll;
4343
#else
4344
    temp.ll = ll | ((uint64_t)e << 52);
4345
#endif
4346
    return temp.d;
4347
}
4348

    
4349
#else
4350

    
4351
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4352
{
4353
    CPU86_LDoubleU temp;
4354

    
4355
    temp.d = f;
4356
    *pmant = temp.l.lower;
4357
    *pexp = temp.l.upper;
4358
}
4359

    
4360
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4361
{
4362
    CPU86_LDoubleU temp;
4363

    
4364
    temp.l.upper = upper;
4365
    temp.l.lower = mant;
4366
    return temp.d;
4367
}
4368
#endif
4369

    
4370
#ifdef TARGET_X86_64
4371

    
4372
//#define DEBUG_MULDIV
4373

    
4374
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4375
{
4376
    *plow += a;
4377
    /* carry test */
4378
    if (*plow < a)
4379
        (*phigh)++;
4380
    *phigh += b;
4381
}
4382

    
4383
static void neg128(uint64_t *plow, uint64_t *phigh)
4384
{
4385
    *plow = ~ *plow;
4386
    *phigh = ~ *phigh;
4387
    add128(plow, phigh, 1, 0);
4388
}
4389

    
4390
/* return TRUE if overflow */
4391
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4392
{
4393
    uint64_t q, r, a1, a0;
4394
    int i, qb, ab;
4395

    
4396
    a0 = *plow;
4397
    a1 = *phigh;
4398
    if (a1 == 0) {
4399
        q = a0 / b;
4400
        r = a0 % b;
4401
        *plow = q;
4402
        *phigh = r;
4403
    } else {
4404
        if (a1 >= b)
4405
            return 1;
4406
        /* XXX: use a better algorithm */
4407
        for(i = 0; i < 64; i++) {
4408
            ab = a1 >> 63;
4409
            a1 = (a1 << 1) | (a0 >> 63);
4410
            if (ab || a1 >= b) {
4411
                a1 -= b;
4412
                qb = 1;
4413
            } else {
4414
                qb = 0;
4415
            }
4416
            a0 = (a0 << 1) | qb;
4417
        }
4418
#if defined(DEBUG_MULDIV)
4419
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4420
               *phigh, *plow, b, a0, a1);
4421
#endif
4422
        *plow = a0;
4423
        *phigh = a1;
4424
    }
4425
    return 0;
4426
}
4427

    
4428
/* return TRUE if overflow */
4429
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4430
{
4431
    int sa, sb;
4432
    sa = ((int64_t)*phigh < 0);
4433
    if (sa)
4434
        neg128(plow, phigh);
4435
    sb = (b < 0);
4436
    if (sb)
4437
        b = -b;
4438
    if (div64(plow, phigh, b) != 0)
4439
        return 1;
4440
    if (sa ^ sb) {
4441
        if (*plow > (1ULL << 63))
4442
            return 1;
4443
        *plow = - *plow;
4444
    } else {
4445
        if (*plow >= (1ULL << 63))
4446
            return 1;
4447
    }
4448
    if (sa)
4449
        *phigh = - *phigh;
4450
    return 0;
4451
}
4452

    
4453
void helper_mulq_EAX_T0(target_ulong t0)
4454
{
4455
    uint64_t r0, r1;
4456

    
4457
    mulu64(&r0, &r1, EAX, t0);
4458
    EAX = r0;
4459
    EDX = r1;
4460
    CC_DST = r0;
4461
    CC_SRC = r1;
4462
}
4463

    
4464
void helper_imulq_EAX_T0(target_ulong t0)
4465
{
4466
    uint64_t r0, r1;
4467

    
4468
    muls64(&r0, &r1, EAX, t0);
4469
    EAX = r0;
4470
    EDX = r1;
4471
    CC_DST = r0;
4472
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4473
}
4474

    
4475
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4476
{
4477
    uint64_t r0, r1;
4478

    
4479
    muls64(&r0, &r1, t0, t1);
4480
    CC_DST = r0;
4481
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4482
    return r0;
4483
}
4484

    
4485
void helper_divq_EAX(target_ulong t0)
4486
{
4487
    uint64_t r0, r1;
4488
    if (t0 == 0) {
4489
        raise_exception(EXCP00_DIVZ);
4490
    }
4491
    r0 = EAX;
4492
    r1 = EDX;
4493
    if (div64(&r0, &r1, t0))
4494
        raise_exception(EXCP00_DIVZ);
4495
    EAX = r0;
4496
    EDX = r1;
4497
}
4498

    
4499
void helper_idivq_EAX(target_ulong t0)
4500
{
4501
    uint64_t r0, r1;
4502
    if (t0 == 0) {
4503
        raise_exception(EXCP00_DIVZ);
4504
    }
4505
    r0 = EAX;
4506
    r1 = EDX;
4507
    if (idiv64(&r0, &r1, t0))
4508
        raise_exception(EXCP00_DIVZ);
4509
    EAX = r0;
4510
    EDX = r1;
4511
}
4512
#endif
4513

    
4514
static void do_hlt(void)
4515
{
4516
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4517
    env->halted = 1;
4518
    env->exception_index = EXCP_HLT;
4519
    cpu_loop_exit();
4520
}
4521

    
4522
void helper_hlt(int next_eip_addend)
4523
{
4524
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4525
    EIP += next_eip_addend;
4526
    
4527
    do_hlt();
4528
}
4529

    
4530
void helper_monitor(target_ulong ptr)
4531
{
4532
    if ((uint32_t)ECX != 0)
4533
        raise_exception(EXCP0D_GPF);
4534
    /* XXX: store address ? */
4535
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4536
}
4537

    
4538
void helper_mwait(int next_eip_addend)
4539
{
4540
    if ((uint32_t)ECX != 0)
4541
        raise_exception(EXCP0D_GPF);
4542
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4543
    EIP += next_eip_addend;
4544

    
4545
    /* XXX: not complete but not completely erroneous */
4546
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4547
        /* more than one CPU: do not sleep because another CPU may
4548
           wake this one */
4549
    } else {
4550
        do_hlt();
4551
    }
4552
}
4553

    
4554
void helper_debug(void)
4555
{
4556
    env->exception_index = EXCP_DEBUG;
4557
    cpu_loop_exit();
4558
}
4559

    
4560
void helper_raise_interrupt(int intno, int next_eip_addend)
4561
{
4562
    raise_interrupt(intno, 1, 0, next_eip_addend);
4563
}
4564

    
4565
void helper_raise_exception(int exception_index)
4566
{
4567
    raise_exception(exception_index);
4568
}
4569

    
4570
void helper_cli(void)
4571
{
4572
    env->eflags &= ~IF_MASK;
4573
}
4574

    
4575
void helper_sti(void)
4576
{
4577
    env->eflags |= IF_MASK;
4578
}
4579

    
4580
#if 0
4581
/* vm86plus instructions */
4582
void helper_cli_vm(void)
4583
{
4584
    env->eflags &= ~VIF_MASK;
4585
}
4586

4587
void helper_sti_vm(void)
4588
{
4589
    env->eflags |= VIF_MASK;
4590
    if (env->eflags & VIP_MASK) {
4591
        raise_exception(EXCP0D_GPF);
4592
    }
4593
}
4594
#endif
4595

    
4596
void helper_set_inhibit_irq(void)
4597
{
4598
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4599
}
4600

    
4601
void helper_reset_inhibit_irq(void)
4602
{
4603
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4604
}
4605

    
4606
void helper_boundw(target_ulong a0, int v)
4607
{
4608
    int low, high;
4609
    low = ldsw(a0);
4610
    high = ldsw(a0 + 2);
4611
    v = (int16_t)v;
4612
    if (v < low || v > high) {
4613
        raise_exception(EXCP05_BOUND);
4614
    }
4615
}
4616

    
4617
void helper_boundl(target_ulong a0, int v)
4618
{
4619
    int low, high;
4620
    low = ldl(a0);
4621
    high = ldl(a0 + 4);
4622
    if (v < low || v > high) {
4623
        raise_exception(EXCP05_BOUND);
4624
    }
4625
}
4626

    
4627
static float approx_rsqrt(float a)
4628
{
4629
    return 1.0 / sqrt(a);
4630
}
4631

    
4632
static float approx_rcp(float a)
4633
{
4634
    return 1.0 / a;
4635
}
4636

    
4637
#if !defined(CONFIG_USER_ONLY)
4638

    
4639
#define MMUSUFFIX _mmu
4640

    
4641
#define SHIFT 0
4642
#include "softmmu_template.h"
4643

    
4644
#define SHIFT 1
4645
#include "softmmu_template.h"
4646

    
4647
#define SHIFT 2
4648
#include "softmmu_template.h"
4649

    
4650
#define SHIFT 3
4651
#include "softmmu_template.h"
4652

    
4653
#endif
4654

    
4655
#if !defined(CONFIG_USER_ONLY)
4656
/* try to fill the TLB and return an exception if error. If retaddr is
4657
   NULL, it means that the function was called in C code (i.e. not
4658
   from generated code or from helper.c) */
4659
/* XXX: fix it to restore all registers */
4660
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4661
{
4662
    TranslationBlock *tb;
4663
    int ret;
4664
    unsigned long pc;
4665
    CPUX86State *saved_env;
4666

    
4667
    /* XXX: hack to restore env in all cases, even if not called from
4668
       generated code */
4669
    saved_env = env;
4670
    env = cpu_single_env;
4671

    
4672
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4673
    if (ret) {
4674
        if (retaddr) {
4675
            /* now we have a real cpu fault */
4676
            pc = (unsigned long)retaddr;
4677
            tb = tb_find_pc(pc);
4678
            if (tb) {
4679
                /* the PC is inside the translated code. It means that we have
4680
                   a virtual CPU fault */
4681
                cpu_restore_state(tb, env, pc, NULL);
4682
            }
4683
        }
4684
        raise_exception_err(env->exception_index, env->error_code);
4685
    }
4686
    env = saved_env;
4687
}
4688
#endif
4689

    
4690
/* Secure Virtual Machine helpers */
4691

    
4692
#if defined(CONFIG_USER_ONLY)
4693

    
4694
void helper_vmrun(int aflag, int next_eip_addend)
4695
{ 
4696
}
4697
void helper_vmmcall(void) 
4698
{ 
4699
}
4700
void helper_vmload(int aflag)
4701
{ 
4702
}
4703
void helper_vmsave(int aflag)
4704
{ 
4705
}
4706
void helper_stgi(void)
4707
{
4708
}
4709
void helper_clgi(void)
4710
{
4711
}
4712
void helper_skinit(void) 
4713
{ 
4714
}
4715
void helper_invlpga(int aflag)
4716
{ 
4717
}
4718
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4719
{ 
4720
}
4721
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4722
{
4723
}
4724

    
4725
void helper_svm_check_io(uint32_t port, uint32_t param, 
4726
                         uint32_t next_eip_addend)
4727
{
4728
}
4729
#else
4730

    
4731
static inline void svm_save_seg(target_phys_addr_t addr,
4732
                                const SegmentCache *sc)
4733
{
4734
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4735
             sc->selector);
4736
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4737
             sc->base);
4738
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4739
             sc->limit);
4740
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4741
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4742
}
4743
                                
4744
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4745
{
4746
    unsigned int flags;
4747

    
4748
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4749
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4750
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4751
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4752
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4753
}
4754

    
4755
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4756
                                      CPUState *env, int seg_reg)
4757
{
4758
    SegmentCache sc1, *sc = &sc1;
4759
    svm_load_seg(addr, sc);
4760
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4761
                           sc->base, sc->limit, sc->flags);
4762
}
4763

    
4764
void helper_vmrun(int aflag, int next_eip_addend)
4765
{
4766
    target_ulong addr;
4767
    uint32_t event_inj;
4768
    uint32_t int_ctl;
4769

    
4770
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4771

    
4772
    if (aflag == 2)
4773
        addr = EAX;
4774
    else
4775
        addr = (uint32_t)EAX;
4776

    
4777
    if (loglevel & CPU_LOG_TB_IN_ASM)
4778
        fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4779

    
4780
    env->vm_vmcb = addr;
4781

    
4782
    /* save the current CPU state in the hsave page */
4783
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4784
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4785

    
4786
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4787
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4788

    
4789
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4790
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4791
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4792
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4793
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4794
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4795

    
4796
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4797
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4798

    
4799
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4800
                  &env->segs[R_ES]);
4801
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4802
                 &env->segs[R_CS]);
4803
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4804
                 &env->segs[R_SS]);
4805
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4806
                 &env->segs[R_DS]);
4807

    
4808
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4809
             EIP + next_eip_addend);
4810
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4811
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4812

    
4813
    /* load the interception bitmaps so we do not need to access the
4814
       vmcb in svm mode */
4815
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4816
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4817
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4818
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4819
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4820
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4821

    
4822
    /* enable intercepts */
4823
    env->hflags |= HF_SVMI_MASK;
4824

    
4825
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4826

    
4827
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4828
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4829

    
4830
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4831
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4832

    
4833
    /* clear exit_info_2 so we behave like the real hardware */
4834
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4835

    
4836
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4837
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4838
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4839
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4840
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4841
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4842
    if (int_ctl & V_INTR_MASKING_MASK) {
4843
        env->v_tpr = int_ctl & V_TPR_MASK;
4844
        env->hflags2 |= HF2_VINTR_MASK;
4845
        if (env->eflags & IF_MASK)
4846
            env->hflags2 |= HF2_HIF_MASK;
4847
    }
4848

    
4849
    cpu_load_efer(env, 
4850
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4851
    env->eflags = 0;
4852
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4853
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4854
    CC_OP = CC_OP_EFLAGS;
4855

    
4856
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4857
                       env, R_ES);
4858
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4859
                       env, R_CS);
4860
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4861
                       env, R_SS);
4862
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4863
                       env, R_DS);
4864

    
4865
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4866
    env->eip = EIP;
4867
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4868
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4869
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4870
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4871
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4872

    
4873
    /* FIXME: guest state consistency checks */
4874

    
4875
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4876
        case TLB_CONTROL_DO_NOTHING:
4877
            break;
4878
        case TLB_CONTROL_FLUSH_ALL_ASID:
4879
            /* FIXME: this is not 100% correct but should work for now */
4880
            tlb_flush(env, 1);
4881
        break;
4882
    }
4883

    
4884
    env->hflags2 |= HF2_GIF_MASK;
4885

    
4886
    if (int_ctl & V_IRQ_MASK) {
4887
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4888
    }
4889

    
4890
    /* maybe we need to inject an event */
4891
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4892
    if (event_inj & SVM_EVTINJ_VALID) {
4893
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4894
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4895
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4896
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4897

    
4898
        if (loglevel & CPU_LOG_TB_IN_ASM)
4899
            fprintf(logfile, "Injecting(%#hx): ", valid_err);
4900
        /* FIXME: need to implement valid_err */
4901
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4902
        case SVM_EVTINJ_TYPE_INTR:
4903
                env->exception_index = vector;
4904
                env->error_code = event_inj_err;
4905
                env->exception_is_int = 0;
4906
                env->exception_next_eip = -1;
4907
                if (loglevel & CPU_LOG_TB_IN_ASM)
4908
                    fprintf(logfile, "INTR");
4909
                /* XXX: is it always correct ? */
4910
                do_interrupt(vector, 0, 0, 0, 1);
4911
                break;
4912
        case SVM_EVTINJ_TYPE_NMI:
4913
                env->exception_index = EXCP02_NMI;
4914
                env->error_code = event_inj_err;
4915
                env->exception_is_int = 0;
4916
                env->exception_next_eip = EIP;
4917
                if (loglevel & CPU_LOG_TB_IN_ASM)
4918
                    fprintf(logfile, "NMI");
4919
                cpu_loop_exit();
4920
                break;
4921
        case SVM_EVTINJ_TYPE_EXEPT:
4922
                env->exception_index = vector;
4923
                env->error_code = event_inj_err;
4924
                env->exception_is_int = 0;
4925
                env->exception_next_eip = -1;
4926
                if (loglevel & CPU_LOG_TB_IN_ASM)
4927
                    fprintf(logfile, "EXEPT");
4928
                cpu_loop_exit();
4929
                break;
4930
        case SVM_EVTINJ_TYPE_SOFT:
4931
                env->exception_index = vector;
4932
                env->error_code = event_inj_err;
4933
                env->exception_is_int = 1;
4934
                env->exception_next_eip = EIP;
4935
                if (loglevel & CPU_LOG_TB_IN_ASM)
4936
                    fprintf(logfile, "SOFT");
4937
                cpu_loop_exit();
4938
                break;
4939
        }
4940
        if (loglevel & CPU_LOG_TB_IN_ASM)
4941
            fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4942
    }
4943
}
4944

    
4945
void helper_vmmcall(void)
4946
{
4947
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4948
    raise_exception(EXCP06_ILLOP);
4949
}
4950

    
4951
void helper_vmload(int aflag)
4952
{
4953
    target_ulong addr;
4954
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4955

    
4956
    if (aflag == 2)
4957
        addr = EAX;
4958
    else
4959
        addr = (uint32_t)EAX;
4960

    
4961
    if (loglevel & CPU_LOG_TB_IN_ASM)
4962
        fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4963
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4964
                env->segs[R_FS].base);
4965

    
4966
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4967
                       env, R_FS);
4968
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4969
                       env, R_GS);
4970
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4971
                 &env->tr);
4972
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4973
                 &env->ldt);
4974

    
4975
#ifdef TARGET_X86_64
4976
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4977
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4978
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4979
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4980
#endif
4981
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4982
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4983
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4984
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4985
}
4986

    
4987
void helper_vmsave(int aflag)
4988
{
4989
    target_ulong addr;
4990
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4991

    
4992
    if (aflag == 2)
4993
        addr = EAX;
4994
    else
4995
        addr = (uint32_t)EAX;
4996

    
4997
    if (loglevel & CPU_LOG_TB_IN_ASM)
4998
        fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4999
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5000
                env->segs[R_FS].base);
5001

    
5002
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5003
                 &env->segs[R_FS]);
5004
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5005
                 &env->segs[R_GS]);
5006
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5007
                 &env->tr);
5008
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5009
                 &env->ldt);
5010

    
5011
#ifdef TARGET_X86_64
5012
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5013
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5014
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5015
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5016
#endif
5017
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5018
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5019
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5020
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5021
}
5022

    
5023
void helper_stgi(void)
5024
{
5025
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5026
    env->hflags2 |= HF2_GIF_MASK;
5027
}
5028

    
5029
void helper_clgi(void)
5030
{
5031
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5032
    env->hflags2 &= ~HF2_GIF_MASK;
5033
}
5034

    
5035
void helper_skinit(void)
5036
{
5037
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5038
    /* XXX: not implemented */
5039
    raise_exception(EXCP06_ILLOP);
5040
}
5041

    
5042
void helper_invlpga(int aflag)
5043
{
5044
    target_ulong addr;
5045
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5046
    
5047
    if (aflag == 2)
5048
        addr = EAX;
5049
    else
5050
        addr = (uint32_t)EAX;
5051

    
5052
    /* XXX: could use the ASID to see if it is needed to do the
5053
       flush */
5054
    tlb_flush_page(env, addr);
5055
}
5056

    
5057
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5058
{
5059
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5060
        return;
5061
    switch(type) {
5062
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5063
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5064
            helper_vmexit(type, param);
5065
        }
5066
        break;
5067
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5068
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5069
            helper_vmexit(type, param);
5070
        }
5071
        break;
5072
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5073
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5074
            helper_vmexit(type, param);
5075
        }
5076
        break;
5077
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5078
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5079
            helper_vmexit(type, param);
5080
        }
5081
        break;
5082
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5083
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5084
            helper_vmexit(type, param);
5085
        }
5086
        break;
5087
    case SVM_EXIT_MSR:
5088
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5089
            /* FIXME: this should be read in at vmrun (faster this way?) */
5090
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5091
            uint32_t t0, t1;
5092
            switch((uint32_t)ECX) {
5093
            case 0 ... 0x1fff:
5094
                t0 = (ECX * 2) % 8;
5095
                t1 = ECX / 8;
5096
                break;
5097
            case 0xc0000000 ... 0xc0001fff:
5098
                t0 = (8192 + ECX - 0xc0000000) * 2;
5099
                t1 = (t0 / 8);
5100
                t0 %= 8;
5101
                break;
5102
            case 0xc0010000 ... 0xc0011fff:
5103
                t0 = (16384 + ECX - 0xc0010000) * 2;
5104
                t1 = (t0 / 8);
5105
                t0 %= 8;
5106
                break;
5107
            default:
5108
                helper_vmexit(type, param);
5109
                t0 = 0;
5110
                t1 = 0;
5111
                break;
5112
            }
5113
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5114
                helper_vmexit(type, param);
5115
        }
5116
        break;
5117
    default:
5118
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5119
            helper_vmexit(type, param);
5120
        }
5121
        break;
5122
    }
5123
}
5124

    
5125
void helper_svm_check_io(uint32_t port, uint32_t param, 
5126
                         uint32_t next_eip_addend)
5127
{
5128
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5129
        /* FIXME: this should be read in at vmrun (faster this way?) */
5130
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5131
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5132
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5133
            /* next EIP */
5134
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5135
                     env->eip + next_eip_addend);
5136
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5137
        }
5138
    }
5139
}
5140

    
5141
/* Note: currently only 32 bits of exit_code are used */
5142
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5143
{
5144
    uint32_t int_ctl;
5145

    
5146
    if (loglevel & CPU_LOG_TB_IN_ASM)
5147
        fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5148
                exit_code, exit_info_1,
5149
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5150
                EIP);
5151

    
5152
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5153
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5154
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5155
    } else {
5156
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5157
    }
5158

    
5159
    /* Save the VM state in the vmcb */
5160
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5161
                 &env->segs[R_ES]);
5162
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5163
                 &env->segs[R_CS]);
5164
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5165
                 &env->segs[R_SS]);
5166
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5167
                 &env->segs[R_DS]);
5168

    
5169
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5170
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5171

    
5172
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5173
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5174

    
5175
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5176
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5177
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5178
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5179
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5180

    
5181
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5182
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5183
    int_ctl |= env->v_tpr & V_TPR_MASK;
5184
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5185
        int_ctl |= V_IRQ_MASK;
5186
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5187

    
5188
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5189
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5190
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5191
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5192
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5193
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5194
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5195

    
5196
    /* Reload the host state from vm_hsave */
5197
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5198
    env->hflags &= ~HF_SVMI_MASK;
5199
    env->intercept = 0;
5200
    env->intercept_exceptions = 0;
5201
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5202
    env->tsc_offset = 0;
5203

    
5204
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5205
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5206

    
5207
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5208
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5209

    
5210
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5211
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5212
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5213
    /* we need to set the efer after the crs so the hidden flags get
5214
       set properly */
5215
    cpu_load_efer(env, 
5216
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5217
    env->eflags = 0;
5218
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5219
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5220
    CC_OP = CC_OP_EFLAGS;
5221

    
5222
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5223
                       env, R_ES);
5224
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5225
                       env, R_CS);
5226
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5227
                       env, R_SS);
5228
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5229
                       env, R_DS);
5230

    
5231
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5232
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5233
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5234

    
5235
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5236
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5237

    
5238
    /* other setups */
5239
    cpu_x86_set_cpl(env, 0);
5240
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5241
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5242

    
5243
    env->hflags2 &= ~HF2_GIF_MASK;
5244
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5245

    
5246
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5247

    
5248
    /* Clears the TSC_OFFSET inside the processor. */
5249

    
5250
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5251
       from the page table indicated the host's CR3. If the PDPEs contain
5252
       illegal state, the processor causes a shutdown. */
5253

    
5254
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5255
    env->cr[0] |= CR0_PE_MASK;
5256
    env->eflags &= ~VM_MASK;
5257

    
5258
    /* Disables all breakpoints in the host DR7 register. */
5259

    
5260
    /* Checks the reloaded host state for consistency. */
5261

    
5262
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5263
       host's code segment or non-canonical (in the case of long mode), a
5264
       #GP fault is delivered inside the host.) */
5265

    
5266
    /* remove any pending exception */
5267
    env->exception_index = -1;
5268
    env->error_code = 0;
5269
    env->old_exception = -1;
5270

    
5271
    cpu_loop_exit();
5272
}
5273

    
5274
#endif
5275

    
5276
/* MMX/SSE */
5277
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5278
void helper_enter_mmx(void)
5279
{
5280
    env->fpstt = 0;
5281
    *(uint32_t *)(env->fptags) = 0;
5282
    *(uint32_t *)(env->fptags + 4) = 0;
5283
}
5284

    
5285
void helper_emms(void)
5286
{
5287
    /* set to empty state */
5288
    *(uint32_t *)(env->fptags) = 0x01010101;
5289
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5290
}
5291

    
5292
/* XXX: suppress */
5293
void helper_movq(void *d, void *s)
5294
{
5295
    *(uint64_t *)d = *(uint64_t *)s;
5296
}
5297

    
5298
#define SHIFT 0
5299
#include "ops_sse.h"
5300

    
5301
#define SHIFT 1
5302
#include "ops_sse.h"
5303

    
5304
#define SHIFT 0
5305
#include "helper_template.h"
5306
#undef SHIFT
5307

    
5308
#define SHIFT 1
5309
#include "helper_template.h"
5310
#undef SHIFT
5311

    
5312
#define SHIFT 2
5313
#include "helper_template.h"
5314
#undef SHIFT
5315

    
5316
#ifdef TARGET_X86_64
5317

    
5318
#define SHIFT 3
5319
#include "helper_template.h"
5320
#undef SHIFT
5321

    
5322
#endif
5323

    
5324
/* bit operations */
5325
target_ulong helper_bsf(target_ulong t0)
5326
{
5327
    int count;
5328
    target_ulong res;
5329

    
5330
    res = t0;
5331
    count = 0;
5332
    while ((res & 1) == 0) {
5333
        count++;
5334
        res >>= 1;
5335
    }
5336
    return count;
5337
}
5338

    
5339
target_ulong helper_bsr(target_ulong t0)
5340
{
5341
    int count;
5342
    target_ulong res, mask;
5343
    
5344
    res = t0;
5345
    count = TARGET_LONG_BITS - 1;
5346
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5347
    while ((res & mask) == 0) {
5348
        count--;
5349
        res <<= 1;
5350
    }
5351
    return count;
5352
}
5353

    
5354

    
5355
static int compute_all_eflags(void)
5356
{
5357
    return CC_SRC;
5358
}
5359

    
5360
static int compute_c_eflags(void)
5361
{
5362
    return CC_SRC & CC_C;
5363
}
5364

    
5365
uint32_t helper_cc_compute_all(int op)
5366
{
5367
    switch (op) {
5368
    default: /* should never happen */ return 0;
5369

    
5370
    case CC_OP_EFLAGS: return compute_all_eflags();
5371

    
5372
    case CC_OP_MULB: return compute_all_mulb();
5373
    case CC_OP_MULW: return compute_all_mulw();
5374
    case CC_OP_MULL: return compute_all_mull();
5375

    
5376
    case CC_OP_ADDB: return compute_all_addb();
5377
    case CC_OP_ADDW: return compute_all_addw();
5378
    case CC_OP_ADDL: return compute_all_addl();
5379

    
5380
    case CC_OP_ADCB: return compute_all_adcb();
5381
    case CC_OP_ADCW: return compute_all_adcw();
5382
    case CC_OP_ADCL: return compute_all_adcl();
5383

    
5384
    case CC_OP_SUBB: return compute_all_subb();
5385
    case CC_OP_SUBW: return compute_all_subw();
5386
    case CC_OP_SUBL: return compute_all_subl();
5387

    
5388
    case CC_OP_SBBB: return compute_all_sbbb();
5389
    case CC_OP_SBBW: return compute_all_sbbw();
5390
    case CC_OP_SBBL: return compute_all_sbbl();
5391

    
5392
    case CC_OP_LOGICB: return compute_all_logicb();
5393
    case CC_OP_LOGICW: return compute_all_logicw();
5394
    case CC_OP_LOGICL: return compute_all_logicl();
5395

    
5396
    case CC_OP_INCB: return compute_all_incb();
5397
    case CC_OP_INCW: return compute_all_incw();
5398
    case CC_OP_INCL: return compute_all_incl();
5399

    
5400
    case CC_OP_DECB: return compute_all_decb();
5401
    case CC_OP_DECW: return compute_all_decw();
5402
    case CC_OP_DECL: return compute_all_decl();
5403

    
5404
    case CC_OP_SHLB: return compute_all_shlb();
5405
    case CC_OP_SHLW: return compute_all_shlw();
5406
    case CC_OP_SHLL: return compute_all_shll();
5407

    
5408
    case CC_OP_SARB: return compute_all_sarb();
5409
    case CC_OP_SARW: return compute_all_sarw();
5410
    case CC_OP_SARL: return compute_all_sarl();
5411

    
5412
#ifdef TARGET_X86_64
5413
    case CC_OP_MULQ: return compute_all_mulq();
5414

    
5415
    case CC_OP_ADDQ: return compute_all_addq();
5416

    
5417
    case CC_OP_ADCQ: return compute_all_adcq();
5418

    
5419
    case CC_OP_SUBQ: return compute_all_subq();
5420

    
5421
    case CC_OP_SBBQ: return compute_all_sbbq();
5422

    
5423
    case CC_OP_LOGICQ: return compute_all_logicq();
5424

    
5425
    case CC_OP_INCQ: return compute_all_incq();
5426

    
5427
    case CC_OP_DECQ: return compute_all_decq();
5428

    
5429
    case CC_OP_SHLQ: return compute_all_shlq();
5430

    
5431
    case CC_OP_SARQ: return compute_all_sarq();
5432
#endif
5433
    }
5434
}
5435

    
5436
uint32_t helper_cc_compute_c(int op)
5437
{
5438
    switch (op) {
5439
    default: /* should never happen */ return 0;
5440

    
5441
    case CC_OP_EFLAGS: return compute_c_eflags();
5442

    
5443
    case CC_OP_MULB: return compute_c_mull();
5444
    case CC_OP_MULW: return compute_c_mull();
5445
    case CC_OP_MULL: return compute_c_mull();
5446

    
5447
    case CC_OP_ADDB: return compute_c_addb();
5448
    case CC_OP_ADDW: return compute_c_addw();
5449
    case CC_OP_ADDL: return compute_c_addl();
5450

    
5451
    case CC_OP_ADCB: return compute_c_adcb();
5452
    case CC_OP_ADCW: return compute_c_adcw();
5453
    case CC_OP_ADCL: return compute_c_adcl();
5454

    
5455
    case CC_OP_SUBB: return compute_c_subb();
5456
    case CC_OP_SUBW: return compute_c_subw();
5457
    case CC_OP_SUBL: return compute_c_subl();
5458

    
5459
    case CC_OP_SBBB: return compute_c_sbbb();
5460
    case CC_OP_SBBW: return compute_c_sbbw();
5461
    case CC_OP_SBBL: return compute_c_sbbl();
5462

    
5463
    case CC_OP_LOGICB: return compute_c_logicb();
5464
    case CC_OP_LOGICW: return compute_c_logicw();
5465
    case CC_OP_LOGICL: return compute_c_logicl();
5466

    
5467
    case CC_OP_INCB: return compute_c_incl();
5468
    case CC_OP_INCW: return compute_c_incl();
5469
    case CC_OP_INCL: return compute_c_incl();
5470

    
5471
    case CC_OP_DECB: return compute_c_incl();
5472
    case CC_OP_DECW: return compute_c_incl();
5473
    case CC_OP_DECL: return compute_c_incl();
5474

    
5475
    case CC_OP_SHLB: return compute_c_shlb();
5476
    case CC_OP_SHLW: return compute_c_shlw();
5477
    case CC_OP_SHLL: return compute_c_shll();
5478

    
5479
    case CC_OP_SARB: return compute_c_sarl();
5480
    case CC_OP_SARW: return compute_c_sarl();
5481
    case CC_OP_SARL: return compute_c_sarl();
5482

    
5483
#ifdef TARGET_X86_64
5484
    case CC_OP_MULQ: return compute_c_mull();
5485

    
5486
    case CC_OP_ADDQ: return compute_c_addq();
5487

    
5488
    case CC_OP_ADCQ: return compute_c_adcq();
5489

    
5490
    case CC_OP_SUBQ: return compute_c_subq();
5491

    
5492
    case CC_OP_SBBQ: return compute_c_sbbq();
5493

    
5494
    case CC_OP_LOGICQ: return compute_c_logicq();
5495

    
5496
    case CC_OP_INCQ: return compute_c_incl();
5497

    
5498
    case CC_OP_DECQ: return compute_c_incl();
5499

    
5500
    case CC_OP_SHLQ: return compute_c_shlq();
5501

    
5502
    case CC_OP_SARQ: return compute_c_sarl();
5503
#endif
5504
    }
5505
}