Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 99a0949b

History | View | Annotate | Download (158.2 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#define CPU_NO_GLOBAL_REGS
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23

    
24
//#define DEBUG_PCALL
25

    
26

    
27
#ifdef DEBUG_PCALL
28
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29
#  define LOG_PCALL_STATE(env) \
30
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
31
#else
32
#  define LOG_PCALL(...) do { } while (0)
33
#  define LOG_PCALL_STATE(env) do { } while (0)
34
#endif
35

    
36

    
37
#if 0
38
#define raise_exception_err(a, b)\
39
do {\
40
    qemu_log("raise_exception line=%d\n", __LINE__);\
41
    (raise_exception_err)(a, b);\
42
} while (0)
43
#endif
44

    
45
static const uint8_t parity_table[256] = {
46
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78
};
79

    
80
/* modulo 17 table */
81
static const uint8_t rclw_table[32] = {
82
    0, 1, 2, 3, 4, 5, 6, 7,
83
    8, 9,10,11,12,13,14,15,
84
   16, 0, 1, 2, 3, 4, 5, 6,
85
    7, 8, 9,10,11,12,13,14,
86
};
87

    
88
/* modulo 9 table */
89
static const uint8_t rclb_table[32] = {
90
    0, 1, 2, 3, 4, 5, 6, 7,
91
    8, 0, 1, 2, 3, 4, 5, 6,
92
    7, 8, 0, 1, 2, 3, 4, 5,
93
    6, 7, 8, 0, 1, 2, 3, 4,
94
};
95

    
96
static const CPU86_LDouble f15rk[7] =
97
{
98
    0.00000000000000000000L,
99
    1.00000000000000000000L,
100
    3.14159265358979323851L,  /*pi*/
101
    0.30102999566398119523L,  /*lg2*/
102
    0.69314718055994530943L,  /*ln2*/
103
    1.44269504088896340739L,  /*l2e*/
104
    3.32192809488736234781L,  /*l2t*/
105
};
106

    
107
/* broken thread support */
108

    
109
static a_spinlock global_cpu_lock = SPIN_LOCK_UNLOCKED;
110

    
111
void helper_lock(void)
112
{
113
    spin_lock(&global_cpu_lock);
114
}
115

    
116
void helper_unlock(void)
117
{
118
    spin_unlock(&global_cpu_lock);
119
}
120

    
121
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
122
{
123
    load_eflags(t0, update_mask);
124
}
125

    
126
target_ulong helper_read_eflags(void)
127
{
128
    uint32_t eflags;
129
    eflags = helper_cc_compute_all(CC_OP);
130
    eflags |= (DF & DF_MASK);
131
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
132
    return eflags;
133
}
134

    
135
/* return non zero if error */
136
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
137
                               int selector)
138
{
139
    SegmentCache *dt;
140
    int index;
141
    target_ulong ptr;
142

    
143
    if (selector & 0x4)
144
        dt = &env->ldt;
145
    else
146
        dt = &env->gdt;
147
    index = selector & ~7;
148
    if ((index + 7) > dt->limit)
149
        return -1;
150
    ptr = dt->base + index;
151
    *e1_ptr = ldl_kernel(ptr);
152
    *e2_ptr = ldl_kernel(ptr + 4);
153
    return 0;
154
}
155

    
156
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
157
{
158
    unsigned int limit;
159
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
160
    if (e2 & DESC_G_MASK)
161
        limit = (limit << 12) | 0xfff;
162
    return limit;
163
}
164

    
165
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
166
{
167
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
168
}
169

    
170
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
171
{
172
    sc->base = get_seg_base(e1, e2);
173
    sc->limit = get_seg_limit(e1, e2);
174
    sc->flags = e2;
175
}
176

    
177
/* init the segment cache in vm86 mode. */
178
static inline void load_seg_vm(int seg, int selector)
179
{
180
    selector &= 0xffff;
181
    cpu_x86_load_seg_cache(env, seg, selector,
182
                           (selector << 4), 0xffff, 0);
183
}
184

    
185
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
186
                                       uint32_t *esp_ptr, int dpl)
187
{
188
    int type, index, shift;
189

    
190
#if 0
191
    {
192
        int i;
193
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
194
        for(i=0;i<env->tr.limit;i++) {
195
            printf("%02x ", env->tr.base[i]);
196
            if ((i & 7) == 7) printf("\n");
197
        }
198
        printf("\n");
199
    }
200
#endif
201

    
202
    if (!(env->tr.flags & DESC_P_MASK))
203
        cpu_abort(env, "invalid tss");
204
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
205
    if ((type & 7) != 1)
206
        cpu_abort(env, "invalid tss type");
207
    shift = type >> 3;
208
    index = (dpl * 4 + 2) << shift;
209
    if (index + (4 << shift) - 1 > env->tr.limit)
210
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
211
    if (shift == 0) {
212
        *esp_ptr = lduw_kernel(env->tr.base + index);
213
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
214
    } else {
215
        *esp_ptr = ldl_kernel(env->tr.base + index);
216
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
217
    }
218
}
219

    
220
/* XXX: merge with load_seg() */
221
static void tss_load_seg(int seg_reg, int selector)
222
{
223
    uint32_t e1, e2;
224
    int rpl, dpl, cpl;
225

    
226
    if ((selector & 0xfffc) != 0) {
227
        if (load_segment(&e1, &e2, selector) != 0)
228
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229
        if (!(e2 & DESC_S_MASK))
230
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
        rpl = selector & 3;
232
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
233
        cpl = env->hflags & HF_CPL_MASK;
234
        if (seg_reg == R_CS) {
235
            if (!(e2 & DESC_CS_MASK))
236
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
            /* XXX: is it correct ? */
238
            if (dpl != rpl)
239
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240
            if ((e2 & DESC_C_MASK) && dpl > rpl)
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
        } else if (seg_reg == R_SS) {
243
            /* SS must be writable data */
244
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
245
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246
            if (dpl != cpl || dpl != rpl)
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
        } else {
249
            /* not readable code */
250
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
251
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252
            /* if data or non conforming code, checks the rights */
253
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
254
                if (dpl < cpl || dpl < rpl)
255
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256
            }
257
        }
258
        if (!(e2 & DESC_P_MASK))
259
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
260
        cpu_x86_load_seg_cache(env, seg_reg, selector,
261
                       get_seg_base(e1, e2),
262
                       get_seg_limit(e1, e2),
263
                       e2);
264
    } else {
265
        if (seg_reg == R_SS || seg_reg == R_CS)
266
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
267
    }
268
}
269

    
270
#define SWITCH_TSS_JMP  0
271
#define SWITCH_TSS_IRET 1
272
#define SWITCH_TSS_CALL 2
273

    
274
/* XXX: restore CPU state in registers (PowerPC case) */
275
static void switch_tss(int tss_selector,
276
                       uint32_t e1, uint32_t e2, int source,
277
                       uint32_t next_eip)
278
{
279
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
280
    target_ulong tss_base;
281
    uint32_t new_regs[8], new_segs[6];
282
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
283
    uint32_t old_eflags, eflags_mask;
284
    SegmentCache *dt;
285
    int index;
286
    target_ulong ptr;
287

    
288
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
289
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
290

    
291
    /* if task gate, we read the TSS segment and we load it */
292
    if (type == 5) {
293
        if (!(e2 & DESC_P_MASK))
294
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
295
        tss_selector = e1 >> 16;
296
        if (tss_selector & 4)
297
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298
        if (load_segment(&e1, &e2, tss_selector) != 0)
299
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300
        if (e2 & DESC_S_MASK)
301
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
303
        if ((type & 7) != 1)
304
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305
    }
306

    
307
    if (!(e2 & DESC_P_MASK))
308
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309

    
310
    if (type & 8)
311
        tss_limit_max = 103;
312
    else
313
        tss_limit_max = 43;
314
    tss_limit = get_seg_limit(e1, e2);
315
    tss_base = get_seg_base(e1, e2);
316
    if ((tss_selector & 4) != 0 ||
317
        tss_limit < tss_limit_max)
318
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
319
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320
    if (old_type & 8)
321
        old_tss_limit_max = 103;
322
    else
323
        old_tss_limit_max = 43;
324

    
325
    /* read all the registers from the new TSS */
326
    if (type & 8) {
327
        /* 32 bit */
328
        new_cr3 = ldl_kernel(tss_base + 0x1c);
329
        new_eip = ldl_kernel(tss_base + 0x20);
330
        new_eflags = ldl_kernel(tss_base + 0x24);
331
        for(i = 0; i < 8; i++)
332
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
333
        for(i = 0; i < 6; i++)
334
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
335
        new_ldt = lduw_kernel(tss_base + 0x60);
336
        new_trap = ldl_kernel(tss_base + 0x64);
337
    } else {
338
        /* 16 bit */
339
        new_cr3 = 0;
340
        new_eip = lduw_kernel(tss_base + 0x0e);
341
        new_eflags = lduw_kernel(tss_base + 0x10);
342
        for(i = 0; i < 8; i++)
343
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
344
        for(i = 0; i < 4; i++)
345
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
346
        new_ldt = lduw_kernel(tss_base + 0x2a);
347
        new_segs[R_FS] = 0;
348
        new_segs[R_GS] = 0;
349
        new_trap = 0;
350
    }
351

    
352
    /* NOTE: we must avoid memory exceptions during the task switch,
353
       so we make dummy accesses before */
354
    /* XXX: it can still fail in some cases, so a bigger hack is
355
       necessary to valid the TLB after having done the accesses */
356

    
357
    v1 = ldub_kernel(env->tr.base);
358
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
359
    stb_kernel(env->tr.base, v1);
360
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
361

    
362
    /* clear busy bit (it is restartable) */
363
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
364
        target_ulong ptr;
365
        uint32_t e2;
366
        ptr = env->gdt.base + (env->tr.selector & ~7);
367
        e2 = ldl_kernel(ptr + 4);
368
        e2 &= ~DESC_TSS_BUSY_MASK;
369
        stl_kernel(ptr + 4, e2);
370
    }
371
    old_eflags = compute_eflags();
372
    if (source == SWITCH_TSS_IRET)
373
        old_eflags &= ~NT_MASK;
374

    
375
    /* save the current state in the old TSS */
376
    if (type & 8) {
377
        /* 32 bit */
378
        stl_kernel(env->tr.base + 0x20, next_eip);
379
        stl_kernel(env->tr.base + 0x24, old_eflags);
380
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
381
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
382
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
383
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
384
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
385
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
386
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
387
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
388
        for(i = 0; i < 6; i++)
389
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390
    } else {
391
        /* 16 bit */
392
        stw_kernel(env->tr.base + 0x0e, next_eip);
393
        stw_kernel(env->tr.base + 0x10, old_eflags);
394
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
395
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
396
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
397
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
398
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
399
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
400
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
401
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
402
        for(i = 0; i < 4; i++)
403
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404
    }
405

    
406
    /* now if an exception occurs, it will occurs in the next task
407
       context */
408

    
409
    if (source == SWITCH_TSS_CALL) {
410
        stw_kernel(tss_base, env->tr.selector);
411
        new_eflags |= NT_MASK;
412
    }
413

    
414
    /* set busy bit */
415
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
416
        target_ulong ptr;
417
        uint32_t e2;
418
        ptr = env->gdt.base + (tss_selector & ~7);
419
        e2 = ldl_kernel(ptr + 4);
420
        e2 |= DESC_TSS_BUSY_MASK;
421
        stl_kernel(ptr + 4, e2);
422
    }
423

    
424
    /* set the new CPU state */
425
    /* from this point, any exception which occurs can give problems */
426
    env->cr[0] |= CR0_TS_MASK;
427
    env->hflags |= HF_TS_MASK;
428
    env->tr.selector = tss_selector;
429
    env->tr.base = tss_base;
430
    env->tr.limit = tss_limit;
431
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
432

    
433
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
434
        cpu_x86_update_cr3(env, new_cr3);
435
    }
436

    
437
    /* load all registers without an exception, then reload them with
438
       possible exception */
439
    env->eip = new_eip;
440
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
441
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
442
    if (!(type & 8))
443
        eflags_mask &= 0xffff;
444
    load_eflags(new_eflags, eflags_mask);
445
    /* XXX: what to do in 16 bit case ? */
446
    EAX = new_regs[0];
447
    ECX = new_regs[1];
448
    EDX = new_regs[2];
449
    EBX = new_regs[3];
450
    ESP = new_regs[4];
451
    EBP = new_regs[5];
452
    ESI = new_regs[6];
453
    EDI = new_regs[7];
454
    if (new_eflags & VM_MASK) {
455
        for(i = 0; i < 6; i++)
456
            load_seg_vm(i, new_segs[i]);
457
        /* in vm86, CPL is always 3 */
458
        cpu_x86_set_cpl(env, 3);
459
    } else {
460
        /* CPL is set the RPL of CS */
461
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
462
        /* first just selectors as the rest may trigger exceptions */
463
        for(i = 0; i < 6; i++)
464
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465
    }
466

    
467
    env->ldt.selector = new_ldt & ~4;
468
    env->ldt.base = 0;
469
    env->ldt.limit = 0;
470
    env->ldt.flags = 0;
471

    
472
    /* load the LDT */
473
    if (new_ldt & 4)
474
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
475

    
476
    if ((new_ldt & 0xfffc) != 0) {
477
        dt = &env->gdt;
478
        index = new_ldt & ~7;
479
        if ((index + 7) > dt->limit)
480
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
        ptr = dt->base + index;
482
        e1 = ldl_kernel(ptr);
483
        e2 = ldl_kernel(ptr + 4);
484
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        if (!(e2 & DESC_P_MASK))
487
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
489
    }
490

    
491
    /* load the segments */
492
    if (!(new_eflags & VM_MASK)) {
493
        tss_load_seg(R_CS, new_segs[R_CS]);
494
        tss_load_seg(R_SS, new_segs[R_SS]);
495
        tss_load_seg(R_ES, new_segs[R_ES]);
496
        tss_load_seg(R_DS, new_segs[R_DS]);
497
        tss_load_seg(R_FS, new_segs[R_FS]);
498
        tss_load_seg(R_GS, new_segs[R_GS]);
499
    }
500

    
501
    /* check that EIP is in the CS segment limits */
502
    if (new_eip > env->segs[R_CS].limit) {
503
        /* XXX: different exception if CALL ? */
504
        raise_exception_err(EXCP0D_GPF, 0);
505
    }
506

    
507
#ifndef CONFIG_USER_ONLY
508
    /* reset local breakpoints */
509
    if (env->dr[7] & 0x55) {
510
        for (i = 0; i < 4; i++) {
511
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
512
                hw_breakpoint_remove(env, i);
513
        }
514
        env->dr[7] &= ~0x55;
515
    }
516
#endif
517
}
518

    
519
/* check if Port I/O is allowed in TSS */
520
static inline void check_io(int addr, int size)
521
{
522
    int io_offset, val, mask;
523

    
524
    /* TSS must be a valid 32 bit one */
525
    if (!(env->tr.flags & DESC_P_MASK) ||
526
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
527
        env->tr.limit < 103)
528
        goto fail;
529
    io_offset = lduw_kernel(env->tr.base + 0x66);
530
    io_offset += (addr >> 3);
531
    /* Note: the check needs two bytes */
532
    if ((io_offset + 1) > env->tr.limit)
533
        goto fail;
534
    val = lduw_kernel(env->tr.base + io_offset);
535
    val >>= (addr & 7);
536
    mask = (1 << size) - 1;
537
    /* all bits must be zero to allow the I/O */
538
    if ((val & mask) != 0) {
539
    fail:
540
        raise_exception_err(EXCP0D_GPF, 0);
541
    }
542
}
543

    
544
void helper_check_iob(uint32_t t0)
545
{
546
    check_io(t0, 1);
547
}
548

    
549
void helper_check_iow(uint32_t t0)
550
{
551
    check_io(t0, 2);
552
}
553

    
554
void helper_check_iol(uint32_t t0)
555
{
556
    check_io(t0, 4);
557
}
558

    
559
void helper_outb(uint32_t port, uint32_t data)
560
{
561
    cpu_outb(port, data & 0xff);
562
}
563

    
564
target_ulong helper_inb(uint32_t port)
565
{
566
    return cpu_inb(port);
567
}
568

    
569
void helper_outw(uint32_t port, uint32_t data)
570
{
571
    cpu_outw(port, data & 0xffff);
572
}
573

    
574
target_ulong helper_inw(uint32_t port)
575
{
576
    return cpu_inw(port);
577
}
578

    
579
void helper_outl(uint32_t port, uint32_t data)
580
{
581
    cpu_outl(port, data);
582
}
583

    
584
target_ulong helper_inl(uint32_t port)
585
{
586
    return cpu_inl(port);
587
}
588

    
589
static inline unsigned int get_sp_mask(unsigned int e2)
590
{
591
    if (e2 & DESC_B_MASK)
592
        return 0xffffffff;
593
    else
594
        return 0xffff;
595
}
596

    
597
static int exeption_has_error_code(int intno)
598
{
599
        switch(intno) {
600
        case 8:
601
        case 10:
602
        case 11:
603
        case 12:
604
        case 13:
605
        case 14:
606
        case 17:
607
            return 1;
608
        }
609
        return 0;
610
}
611

    
612
#ifdef TARGET_X86_64
613
#define SET_ESP(val, sp_mask)\
614
do {\
615
    if ((sp_mask) == 0xffff)\
616
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617
    else if ((sp_mask) == 0xffffffffLL)\
618
        ESP = (uint32_t)(val);\
619
    else\
620
        ESP = (val);\
621
} while (0)
622
#else
623
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
624
#endif
625

    
626
/* in 64-bit machines, this can overflow. So this segment addition macro
627
 * can be used to trim the value to 32-bit whenever needed */
628
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
629

    
630
/* XXX: add a is_user flag to have proper security support */
631
#define PUSHW(ssp, sp, sp_mask, val)\
632
{\
633
    sp -= 2;\
634
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635
}
636

    
637
#define PUSHL(ssp, sp, sp_mask, val)\
638
{\
639
    sp -= 4;\
640
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
641
}
642

    
643
#define POPW(ssp, sp, sp_mask, val)\
644
{\
645
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
646
    sp += 2;\
647
}
648

    
649
#define POPL(ssp, sp, sp_mask, val)\
650
{\
651
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
652
    sp += 4;\
653
}
654

    
655
/* protected mode interrupt */
656
static void do_interrupt_protected(int intno, int is_int, int error_code,
657
                                   unsigned int next_eip, int is_hw)
658
{
659
    SegmentCache *dt;
660
    target_ulong ptr, ssp;
661
    int type, dpl, selector, ss_dpl, cpl;
662
    int has_error_code, new_stack, shift;
663
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
664
    uint32_t old_eip, sp_mask;
665

    
666
    has_error_code = 0;
667
    if (!is_int && !is_hw)
668
        has_error_code = exeption_has_error_code(intno);
669
    if (is_int)
670
        old_eip = next_eip;
671
    else
672
        old_eip = env->eip;
673

    
674
    dt = &env->idt;
675
    if (intno * 8 + 7 > dt->limit)
676
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
677
    ptr = dt->base + intno * 8;
678
    e1 = ldl_kernel(ptr);
679
    e2 = ldl_kernel(ptr + 4);
680
    /* check gate type */
681
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
682
    switch(type) {
683
    case 5: /* task gate */
684
        /* must do that check here to return the correct error code */
685
        if (!(e2 & DESC_P_MASK))
686
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
687
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
688
        if (has_error_code) {
689
            int type;
690
            uint32_t mask;
691
            /* push the error code */
692
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
693
            shift = type >> 3;
694
            if (env->segs[R_SS].flags & DESC_B_MASK)
695
                mask = 0xffffffff;
696
            else
697
                mask = 0xffff;
698
            esp = (ESP - (2 << shift)) & mask;
699
            ssp = env->segs[R_SS].base + esp;
700
            if (shift)
701
                stl_kernel(ssp, error_code);
702
            else
703
                stw_kernel(ssp, error_code);
704
            SET_ESP(esp, mask);
705
        }
706
        return;
707
    case 6: /* 286 interrupt gate */
708
    case 7: /* 286 trap gate */
709
    case 14: /* 386 interrupt gate */
710
    case 15: /* 386 trap gate */
711
        break;
712
    default:
713
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
714
        break;
715
    }
716
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
717
    cpl = env->hflags & HF_CPL_MASK;
718
    /* check privilege if software int */
719
    if (is_int && dpl < cpl)
720
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
721
    /* check valid bit */
722
    if (!(e2 & DESC_P_MASK))
723
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
724
    selector = e1 >> 16;
725
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
726
    if ((selector & 0xfffc) == 0)
727
        raise_exception_err(EXCP0D_GPF, 0);
728

    
729
    if (load_segment(&e1, &e2, selector) != 0)
730
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
732
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
734
    if (dpl > cpl)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_P_MASK))
737
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
738
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
739
        /* to inner privilege */
740
        get_ss_esp_from_tss(&ss, &esp, dpl);
741
        if ((ss & 0xfffc) == 0)
742
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743
        if ((ss & 3) != dpl)
744
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
746
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
748
        if (ss_dpl != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (!(ss_e2 & DESC_S_MASK) ||
751
            (ss_e2 & DESC_CS_MASK) ||
752
            !(ss_e2 & DESC_W_MASK))
753
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754
        if (!(ss_e2 & DESC_P_MASK))
755
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756
        new_stack = 1;
757
        sp_mask = get_sp_mask(ss_e2);
758
        ssp = get_seg_base(ss_e1, ss_e2);
759
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
760
        /* to same privilege */
761
        if (env->eflags & VM_MASK)
762
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763
        new_stack = 0;
764
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
765
        ssp = env->segs[R_SS].base;
766
        esp = ESP;
767
        dpl = cpl;
768
    } else {
769
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
770
        new_stack = 0; /* avoid warning */
771
        sp_mask = 0; /* avoid warning */
772
        ssp = 0; /* avoid warning */
773
        esp = 0; /* avoid warning */
774
    }
775

    
776
    shift = type >> 3;
777

    
778
#if 0
779
    /* XXX: check that enough room is available */
780
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781
    if (env->eflags & VM_MASK)
782
        push_size += 8;
783
    push_size <<= shift;
784
#endif
785
    if (shift == 1) {
786
        if (new_stack) {
787
            if (env->eflags & VM_MASK) {
788
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
792
            }
793
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
794
            PUSHL(ssp, esp, sp_mask, ESP);
795
        }
796
        PUSHL(ssp, esp, sp_mask, compute_eflags());
797
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
798
        PUSHL(ssp, esp, sp_mask, old_eip);
799
        if (has_error_code) {
800
            PUSHL(ssp, esp, sp_mask, error_code);
801
        }
802
    } else {
803
        if (new_stack) {
804
            if (env->eflags & VM_MASK) {
805
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
809
            }
810
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
811
            PUSHW(ssp, esp, sp_mask, ESP);
812
        }
813
        PUSHW(ssp, esp, sp_mask, compute_eflags());
814
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
815
        PUSHW(ssp, esp, sp_mask, old_eip);
816
        if (has_error_code) {
817
            PUSHW(ssp, esp, sp_mask, error_code);
818
        }
819
    }
820

    
821
    if (new_stack) {
822
        if (env->eflags & VM_MASK) {
823
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
824
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
827
        }
828
        ss = (ss & ~3) | dpl;
829
        cpu_x86_load_seg_cache(env, R_SS, ss,
830
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
831
    }
832
    SET_ESP(esp, sp_mask);
833

    
834
    selector = (selector & ~3) | dpl;
835
    cpu_x86_load_seg_cache(env, R_CS, selector,
836
                   get_seg_base(e1, e2),
837
                   get_seg_limit(e1, e2),
838
                   e2);
839
    cpu_x86_set_cpl(env, dpl);
840
    env->eip = offset;
841

    
842
    /* interrupt gate clear IF mask */
843
    if ((type & 1) == 0) {
844
        env->eflags &= ~IF_MASK;
845
    }
846
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
847
}
848

    
849
#ifdef TARGET_X86_64
850

    
851
#define PUSHQ(sp, val)\
852
{\
853
    sp -= 8;\
854
    stq_kernel(sp, (val));\
855
}
856

    
857
#define POPQ(sp, val)\
858
{\
859
    val = ldq_kernel(sp);\
860
    sp += 8;\
861
}
862

    
863
static inline target_ulong get_rsp_from_tss(int level)
864
{
865
    int index;
866

    
867
#if 0
868
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
869
           env->tr.base, env->tr.limit);
870
#endif
871

    
872
    if (!(env->tr.flags & DESC_P_MASK))
873
        cpu_abort(env, "invalid tss");
874
    index = 8 * level + 4;
875
    if ((index + 7) > env->tr.limit)
876
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
877
    return ldq_kernel(env->tr.base + index);
878
}
879

    
880
/* 64 bit interrupt */
881
static void do_interrupt64(int intno, int is_int, int error_code,
882
                           target_ulong next_eip, int is_hw)
883
{
884
    SegmentCache *dt;
885
    target_ulong ptr;
886
    int type, dpl, selector, cpl, ist;
887
    int has_error_code, new_stack;
888
    uint32_t e1, e2, e3, ss;
889
    target_ulong old_eip, esp, offset;
890

    
891
    has_error_code = 0;
892
    if (!is_int && !is_hw)
893
        has_error_code = exeption_has_error_code(intno);
894
    if (is_int)
895
        old_eip = next_eip;
896
    else
897
        old_eip = env->eip;
898

    
899
    dt = &env->idt;
900
    if (intno * 16 + 15 > dt->limit)
901
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902
    ptr = dt->base + intno * 16;
903
    e1 = ldl_kernel(ptr);
904
    e2 = ldl_kernel(ptr + 4);
905
    e3 = ldl_kernel(ptr + 8);
906
    /* check gate type */
907
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908
    switch(type) {
909
    case 14: /* 386 interrupt gate */
910
    case 15: /* 386 trap gate */
911
        break;
912
    default:
913
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914
        break;
915
    }
916
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917
    cpl = env->hflags & HF_CPL_MASK;
918
    /* check privilege if software int */
919
    if (is_int && dpl < cpl)
920
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921
    /* check valid bit */
922
    if (!(e2 & DESC_P_MASK))
923
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924
    selector = e1 >> 16;
925
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926
    ist = e2 & 7;
927
    if ((selector & 0xfffc) == 0)
928
        raise_exception_err(EXCP0D_GPF, 0);
929

    
930
    if (load_segment(&e1, &e2, selector) != 0)
931
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935
    if (dpl > cpl)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_P_MASK))
938
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942
        /* to inner privilege */
943
        if (ist != 0)
944
            esp = get_rsp_from_tss(ist + 3);
945
        else
946
            esp = get_rsp_from_tss(dpl);
947
        esp &= ~0xfLL; /* align stack */
948
        ss = 0;
949
        new_stack = 1;
950
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951
        /* to same privilege */
952
        if (env->eflags & VM_MASK)
953
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954
        new_stack = 0;
955
        if (ist != 0)
956
            esp = get_rsp_from_tss(ist + 3);
957
        else
958
            esp = ESP;
959
        esp &= ~0xfLL; /* align stack */
960
        dpl = cpl;
961
    } else {
962
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963
        new_stack = 0; /* avoid warning */
964
        esp = 0; /* avoid warning */
965
    }
966

    
967
    PUSHQ(esp, env->segs[R_SS].selector);
968
    PUSHQ(esp, ESP);
969
    PUSHQ(esp, compute_eflags());
970
    PUSHQ(esp, env->segs[R_CS].selector);
971
    PUSHQ(esp, old_eip);
972
    if (has_error_code) {
973
        PUSHQ(esp, error_code);
974
    }
975

    
976
    if (new_stack) {
977
        ss = 0 | dpl;
978
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979
    }
980
    ESP = esp;
981

    
982
    selector = (selector & ~3) | dpl;
983
    cpu_x86_load_seg_cache(env, R_CS, selector,
984
                   get_seg_base(e1, e2),
985
                   get_seg_limit(e1, e2),
986
                   e2);
987
    cpu_x86_set_cpl(env, dpl);
988
    env->eip = offset;
989

    
990
    /* interrupt gate clear IF mask */
991
    if ((type & 1) == 0) {
992
        env->eflags &= ~IF_MASK;
993
    }
994
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995
}
996
#endif
997

    
998
#ifdef TARGET_X86_64
999
#if defined(CONFIG_USER_ONLY)
1000
void helper_syscall(int next_eip_addend)
1001
{
1002
    env->exception_index = EXCP_SYSCALL;
1003
    env->exception_next_eip = env->eip + next_eip_addend;
1004
    cpu_loop_exit();
1005
}
1006
#else
1007
void helper_syscall(int next_eip_addend)
1008
{
1009
    int selector;
1010

    
1011
    if (!(env->efer & MSR_EFER_SCE)) {
1012
        raise_exception_err(EXCP06_ILLOP, 0);
1013
    }
1014
    selector = (env->star >> 32) & 0xffff;
1015
    if (env->hflags & HF_LMA_MASK) {
1016
        int code64;
1017

    
1018
        ECX = env->eip + next_eip_addend;
1019
        env->regs[11] = compute_eflags();
1020

    
1021
        code64 = env->hflags & HF_CS64_MASK;
1022

    
1023
        cpu_x86_set_cpl(env, 0);
1024
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025
                           0, 0xffffffff,
1026
                               DESC_G_MASK | DESC_P_MASK |
1027
                               DESC_S_MASK |
1028
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030
                               0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_W_MASK | DESC_A_MASK);
1034
        env->eflags &= ~env->fmask;
1035
        load_eflags(env->eflags, 0);
1036
        if (code64)
1037
            env->eip = env->lstar;
1038
        else
1039
            env->eip = env->cstar;
1040
    } else {
1041
        ECX = (uint32_t)(env->eip + next_eip_addend);
1042

    
1043
        cpu_x86_set_cpl(env, 0);
1044
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1045
                           0, 0xffffffff,
1046
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1047
                               DESC_S_MASK |
1048
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1049
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1050
                               0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_W_MASK | DESC_A_MASK);
1054
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1055
        env->eip = (uint32_t)env->star;
1056
    }
1057
}
1058
#endif
1059
#endif
1060

    
1061
#ifdef TARGET_X86_64
1062
void helper_sysret(int dflag)
1063
{
1064
    int cpl, selector;
1065

    
1066
    if (!(env->efer & MSR_EFER_SCE)) {
1067
        raise_exception_err(EXCP06_ILLOP, 0);
1068
    }
1069
    cpl = env->hflags & HF_CPL_MASK;
1070
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071
        raise_exception_err(EXCP0D_GPF, 0);
1072
    }
1073
    selector = (env->star >> 48) & 0xffff;
1074
    if (env->hflags & HF_LMA_MASK) {
1075
        if (dflag == 2) {
1076
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1077
                                   0, 0xffffffff,
1078
                                   DESC_G_MASK | DESC_P_MASK |
1079
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1081
                                   DESC_L_MASK);
1082
            env->eip = ECX;
1083
        } else {
1084
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1085
                                   0, 0xffffffff,
1086
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1089
            env->eip = (uint32_t)ECX;
1090
        }
1091
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1092
                               0, 0xffffffff,
1093
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095
                               DESC_W_MASK | DESC_A_MASK);
1096
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1097
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1098
        cpu_x86_set_cpl(env, 3);
1099
    } else {
1100
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1101
                               0, 0xffffffff,
1102
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1104
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1105
        env->eip = (uint32_t)ECX;
1106
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1107
                               0, 0xffffffff,
1108
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110
                               DESC_W_MASK | DESC_A_MASK);
1111
        env->eflags |= IF_MASK;
1112
        cpu_x86_set_cpl(env, 3);
1113
    }
1114
}
1115
#endif
1116

    
1117
/* real mode interrupt */
1118
static void do_interrupt_real(int intno, int is_int, int error_code,
1119
                              unsigned int next_eip)
1120
{
1121
    SegmentCache *dt;
1122
    target_ulong ptr, ssp;
1123
    int selector;
1124
    uint32_t offset, esp;
1125
    uint32_t old_cs, old_eip;
1126

    
1127
    /* real mode (simpler !) */
1128
    dt = &env->idt;
1129
    if (intno * 4 + 3 > dt->limit)
1130
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1131
    ptr = dt->base + intno * 4;
1132
    offset = lduw_kernel(ptr);
1133
    selector = lduw_kernel(ptr + 2);
1134
    esp = ESP;
1135
    ssp = env->segs[R_SS].base;
1136
    if (is_int)
1137
        old_eip = next_eip;
1138
    else
1139
        old_eip = env->eip;
1140
    old_cs = env->segs[R_CS].selector;
1141
    /* XXX: use SS segment size ? */
1142
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1143
    PUSHW(ssp, esp, 0xffff, old_cs);
1144
    PUSHW(ssp, esp, 0xffff, old_eip);
1145

    
1146
    /* update processor state */
1147
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1148
    env->eip = offset;
1149
    env->segs[R_CS].selector = selector;
1150
    env->segs[R_CS].base = (selector << 4);
1151
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1152
}
1153

    
1154
/* fake user mode interrupt */
1155
void do_interrupt_user(int intno, int is_int, int error_code,
1156
                       target_ulong next_eip)
1157
{
1158
    SegmentCache *dt;
1159
    target_ulong ptr;
1160
    int dpl, cpl, shift;
1161
    uint32_t e2;
1162

    
1163
    dt = &env->idt;
1164
    if (env->hflags & HF_LMA_MASK) {
1165
        shift = 4;
1166
    } else {
1167
        shift = 3;
1168
    }
1169
    ptr = dt->base + (intno << shift);
1170
    e2 = ldl_kernel(ptr + 4);
1171

    
1172
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1173
    cpl = env->hflags & HF_CPL_MASK;
1174
    /* check privilege if software int */
1175
    if (is_int && dpl < cpl)
1176
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177

    
1178
    /* Since we emulate only user space, we cannot do more than
1179
       exiting the emulation with the suitable exception and error
1180
       code */
1181
    if (is_int)
1182
        EIP = next_eip;
1183
}
1184

    
1185
#if !defined(CONFIG_USER_ONLY)
1186
static void handle_even_inj(int intno, int is_int, int error_code,
1187
                int is_hw, int rm)
1188
{
1189
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1190
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1191
            int type;
1192
            if (is_int)
1193
                    type = SVM_EVTINJ_TYPE_SOFT;
1194
            else
1195
                    type = SVM_EVTINJ_TYPE_EXEPT;
1196
            event_inj = intno | type | SVM_EVTINJ_VALID;
1197
            if (!rm && exeption_has_error_code(intno)) {
1198
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1199
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1200
            }
1201
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1202
    }
1203
}
1204
#endif
1205

    
1206
/*
1207
 * Begin execution of an interruption. is_int is TRUE if coming from
1208
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209
 * instruction. It is only relevant if is_int is TRUE.
1210
 */
1211
void do_interrupt(int intno, int is_int, int error_code,
1212
                  target_ulong next_eip, int is_hw)
1213
{
1214
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1215
        if ((env->cr[0] & CR0_PE_MASK)) {
1216
            static int count;
1217
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218
                    count, intno, error_code, is_int,
1219
                    env->hflags & HF_CPL_MASK,
1220
                    env->segs[R_CS].selector, EIP,
1221
                    (int)env->segs[R_CS].base + EIP,
1222
                    env->segs[R_SS].selector, ESP);
1223
            if (intno == 0x0e) {
1224
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225
            } else {
1226
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1227
            }
1228
            qemu_log("\n");
1229
            log_cpu_state(env, X86_DUMP_CCOP);
1230
#if 0
1231
            {
1232
                int i;
1233
                uint8_t *ptr;
1234
                qemu_log("       code=");
1235
                ptr = env->segs[R_CS].base + env->eip;
1236
                for(i = 0; i < 16; i++) {
1237
                    qemu_log(" %02x", ldub(ptr + i));
1238
                }
1239
                qemu_log("\n");
1240
            }
1241
#endif
1242
            count++;
1243
        }
1244
    }
1245
    if (env->cr[0] & CR0_PE_MASK) {
1246
#if !defined(CONFIG_USER_ONLY)
1247
        if (env->hflags & HF_SVMI_MASK)
1248
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1249
#endif
1250
#ifdef TARGET_X86_64
1251
        if (env->hflags & HF_LMA_MASK) {
1252
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1253
        } else
1254
#endif
1255
        {
1256
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1257
        }
1258
    } else {
1259
#if !defined(CONFIG_USER_ONLY)
1260
        if (env->hflags & HF_SVMI_MASK)
1261
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1262
#endif
1263
        do_interrupt_real(intno, is_int, error_code, next_eip);
1264
    }
1265

    
1266
#if !defined(CONFIG_USER_ONLY)
1267
    if (env->hflags & HF_SVMI_MASK) {
1268
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1269
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1270
    }
1271
#endif
1272
}
1273

    
1274
/* This should come from sysemu.h - if we could include it here... */
1275
void qemu_system_reset_request(void);
1276

    
1277
/*
1278
 * Check nested exceptions and change to double or triple fault if
1279
 * needed. It should only be called, if this is not an interrupt.
1280
 * Returns the new exception number.
1281
 */
1282
static int check_exception(int intno, int *error_code)
1283
{
1284
    int first_contributory = env->old_exception == 0 ||
1285
                              (env->old_exception >= 10 &&
1286
                               env->old_exception <= 13);
1287
    int second_contributory = intno == 0 ||
1288
                               (intno >= 10 && intno <= 13);
1289

    
1290
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1291
                env->old_exception, intno);
1292

    
1293
#if !defined(CONFIG_USER_ONLY)
1294
    if (env->old_exception == EXCP08_DBLE) {
1295
        if (env->hflags & HF_SVMI_MASK)
1296
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1297

    
1298
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1299

    
1300
        qemu_system_reset_request();
1301
        return EXCP_HLT;
1302
    }
1303
#endif
1304

    
1305
    if ((first_contributory && second_contributory)
1306
        || (env->old_exception == EXCP0E_PAGE &&
1307
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1308
        intno = EXCP08_DBLE;
1309
        *error_code = 0;
1310
    }
1311

    
1312
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1313
        (intno == EXCP08_DBLE))
1314
        env->old_exception = intno;
1315

    
1316
    return intno;
1317
}
1318

    
1319
/*
1320
 * Signal an interruption. It is executed in the main CPU loop.
1321
 * is_int is TRUE if coming from the int instruction. next_eip is the
1322
 * EIP value AFTER the interrupt instruction. It is only relevant if
1323
 * is_int is TRUE.
1324
 */
1325
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1326
                                          int next_eip_addend)
1327
{
1328
    if (!is_int) {
1329
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1330
        intno = check_exception(intno, &error_code);
1331
    } else {
1332
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1333
    }
1334

    
1335
    env->exception_index = intno;
1336
    env->error_code = error_code;
1337
    env->exception_is_int = is_int;
1338
    env->exception_next_eip = env->eip + next_eip_addend;
1339
    cpu_loop_exit();
1340
}
1341

    
1342
/* shortcuts to generate exceptions */
1343

    
1344
void raise_exception_err(int exception_index, int error_code)
1345
{
1346
    raise_interrupt(exception_index, 0, error_code, 0);
1347
}
1348

    
1349
void raise_exception(int exception_index)
1350
{
1351
    raise_interrupt(exception_index, 0, 0, 0);
1352
}
1353

    
1354
/* SMM support */
1355

    
1356
#if defined(CONFIG_USER_ONLY)
1357

    
1358
void do_smm_enter(void)
1359
{
1360
}
1361

    
1362
void helper_rsm(void)
1363
{
1364
}
1365

    
1366
#else
1367

    
1368
#ifdef TARGET_X86_64
1369
#define SMM_REVISION_ID 0x00020064
1370
#else
1371
#define SMM_REVISION_ID 0x00020000
1372
#endif
1373

    
1374
void do_smm_enter(void)
1375
{
1376
    target_ulong sm_state;
1377
    SegmentCache *dt;
1378
    int i, offset;
1379

    
1380
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1381
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1382

    
1383
    env->hflags |= HF_SMM_MASK;
1384
    cpu_smm_update(env);
1385

    
1386
    sm_state = env->smbase + 0x8000;
1387

    
1388
#ifdef TARGET_X86_64
1389
    for(i = 0; i < 6; i++) {
1390
        dt = &env->segs[i];
1391
        offset = 0x7e00 + i * 16;
1392
        stw_phys(sm_state + offset, dt->selector);
1393
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1394
        stl_phys(sm_state + offset + 4, dt->limit);
1395
        stq_phys(sm_state + offset + 8, dt->base);
1396
    }
1397

    
1398
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1399
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1400

    
1401
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1402
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1403
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1404
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1405

    
1406
    stq_phys(sm_state + 0x7e88, env->idt.base);
1407
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1408

    
1409
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1410
    stq_phys(sm_state + 0x7e98, env->tr.base);
1411
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1412
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1413

    
1414
    stq_phys(sm_state + 0x7ed0, env->efer);
1415

    
1416
    stq_phys(sm_state + 0x7ff8, EAX);
1417
    stq_phys(sm_state + 0x7ff0, ECX);
1418
    stq_phys(sm_state + 0x7fe8, EDX);
1419
    stq_phys(sm_state + 0x7fe0, EBX);
1420
    stq_phys(sm_state + 0x7fd8, ESP);
1421
    stq_phys(sm_state + 0x7fd0, EBP);
1422
    stq_phys(sm_state + 0x7fc8, ESI);
1423
    stq_phys(sm_state + 0x7fc0, EDI);
1424
    for(i = 8; i < 16; i++)
1425
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1426
    stq_phys(sm_state + 0x7f78, env->eip);
1427
    stl_phys(sm_state + 0x7f70, compute_eflags());
1428
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1429
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1430

    
1431
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1432
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1433
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1434

    
1435
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1436
    stl_phys(sm_state + 0x7f00, env->smbase);
1437
#else
1438
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1439
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1440
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1441
    stl_phys(sm_state + 0x7ff0, env->eip);
1442
    stl_phys(sm_state + 0x7fec, EDI);
1443
    stl_phys(sm_state + 0x7fe8, ESI);
1444
    stl_phys(sm_state + 0x7fe4, EBP);
1445
    stl_phys(sm_state + 0x7fe0, ESP);
1446
    stl_phys(sm_state + 0x7fdc, EBX);
1447
    stl_phys(sm_state + 0x7fd8, EDX);
1448
    stl_phys(sm_state + 0x7fd4, ECX);
1449
    stl_phys(sm_state + 0x7fd0, EAX);
1450
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1451
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1452

    
1453
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1454
    stl_phys(sm_state + 0x7f64, env->tr.base);
1455
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1456
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1457

    
1458
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1459
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1460
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1461
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1462

    
1463
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1464
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1465

    
1466
    stl_phys(sm_state + 0x7f58, env->idt.base);
1467
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1468

    
1469
    for(i = 0; i < 6; i++) {
1470
        dt = &env->segs[i];
1471
        if (i < 3)
1472
            offset = 0x7f84 + i * 12;
1473
        else
1474
            offset = 0x7f2c + (i - 3) * 12;
1475
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1476
        stl_phys(sm_state + offset + 8, dt->base);
1477
        stl_phys(sm_state + offset + 4, dt->limit);
1478
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1479
    }
1480
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1481

    
1482
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1483
    stl_phys(sm_state + 0x7ef8, env->smbase);
1484
#endif
1485
    /* init SMM cpu state */
1486

    
1487
#ifdef TARGET_X86_64
1488
    cpu_load_efer(env, 0);
1489
#endif
1490
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1491
    env->eip = 0x00008000;
1492
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1493
                           0xffffffff, 0);
1494
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1495
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1496
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1497
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1498
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1499

    
1500
    cpu_x86_update_cr0(env,
1501
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1502
    cpu_x86_update_cr4(env, 0);
1503
    env->dr[7] = 0x00000400;
1504
    CC_OP = CC_OP_EFLAGS;
1505
}
1506

    
1507
void helper_rsm(void)
1508
{
1509
    target_ulong sm_state;
1510
    int i, offset;
1511
    uint32_t val;
1512

    
1513
    sm_state = env->smbase + 0x8000;
1514
#ifdef TARGET_X86_64
1515
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1516

    
1517
    for(i = 0; i < 6; i++) {
1518
        offset = 0x7e00 + i * 16;
1519
        cpu_x86_load_seg_cache(env, i,
1520
                               lduw_phys(sm_state + offset),
1521
                               ldq_phys(sm_state + offset + 8),
1522
                               ldl_phys(sm_state + offset + 4),
1523
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1524
    }
1525

    
1526
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1527
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1528

    
1529
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1530
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1531
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1532
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1533

    
1534
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1535
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1536

    
1537
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1538
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1539
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1540
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1541

    
1542
    EAX = ldq_phys(sm_state + 0x7ff8);
1543
    ECX = ldq_phys(sm_state + 0x7ff0);
1544
    EDX = ldq_phys(sm_state + 0x7fe8);
1545
    EBX = ldq_phys(sm_state + 0x7fe0);
1546
    ESP = ldq_phys(sm_state + 0x7fd8);
1547
    EBP = ldq_phys(sm_state + 0x7fd0);
1548
    ESI = ldq_phys(sm_state + 0x7fc8);
1549
    EDI = ldq_phys(sm_state + 0x7fc0);
1550
    for(i = 8; i < 16; i++)
1551
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1552
    env->eip = ldq_phys(sm_state + 0x7f78);
1553
    load_eflags(ldl_phys(sm_state + 0x7f70),
1554
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1555
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1556
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1557

    
1558
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1559
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1560
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1561

    
1562
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563
    if (val & 0x20000) {
1564
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1565
    }
1566
#else
1567
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1568
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1569
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1570
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1571
    env->eip = ldl_phys(sm_state + 0x7ff0);
1572
    EDI = ldl_phys(sm_state + 0x7fec);
1573
    ESI = ldl_phys(sm_state + 0x7fe8);
1574
    EBP = ldl_phys(sm_state + 0x7fe4);
1575
    ESP = ldl_phys(sm_state + 0x7fe0);
1576
    EBX = ldl_phys(sm_state + 0x7fdc);
1577
    EDX = ldl_phys(sm_state + 0x7fd8);
1578
    ECX = ldl_phys(sm_state + 0x7fd4);
1579
    EAX = ldl_phys(sm_state + 0x7fd0);
1580
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1581
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1582

    
1583
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1584
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1585
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1586
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1587

    
1588
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1589
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1590
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1591
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1592

    
1593
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1594
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1595

    
1596
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1597
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1598

    
1599
    for(i = 0; i < 6; i++) {
1600
        if (i < 3)
1601
            offset = 0x7f84 + i * 12;
1602
        else
1603
            offset = 0x7f2c + (i - 3) * 12;
1604
        cpu_x86_load_seg_cache(env, i,
1605
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1606
                               ldl_phys(sm_state + offset + 8),
1607
                               ldl_phys(sm_state + offset + 4),
1608
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1609
    }
1610
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1611

    
1612
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1613
    if (val & 0x20000) {
1614
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1615
    }
1616
#endif
1617
    CC_OP = CC_OP_EFLAGS;
1618
    env->hflags &= ~HF_SMM_MASK;
1619
    cpu_smm_update(env);
1620

    
1621
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1622
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1623
}
1624

    
1625
#endif /* !CONFIG_USER_ONLY */
1626

    
1627

    
1628
/* division, flags are undefined */
1629

    
1630
void helper_divb_AL(target_ulong t0)
1631
{
1632
    unsigned int num, den, q, r;
1633

    
1634
    num = (EAX & 0xffff);
1635
    den = (t0 & 0xff);
1636
    if (den == 0) {
1637
        raise_exception(EXCP00_DIVZ);
1638
    }
1639
    q = (num / den);
1640
    if (q > 0xff)
1641
        raise_exception(EXCP00_DIVZ);
1642
    q &= 0xff;
1643
    r = (num % den) & 0xff;
1644
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1645
}
1646

    
1647
void helper_idivb_AL(target_ulong t0)
1648
{
1649
    int num, den, q, r;
1650

    
1651
    num = (int16_t)EAX;
1652
    den = (int8_t)t0;
1653
    if (den == 0) {
1654
        raise_exception(EXCP00_DIVZ);
1655
    }
1656
    q = (num / den);
1657
    if (q != (int8_t)q)
1658
        raise_exception(EXCP00_DIVZ);
1659
    q &= 0xff;
1660
    r = (num % den) & 0xff;
1661
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1662
}
1663

    
1664
void helper_divw_AX(target_ulong t0)
1665
{
1666
    unsigned int num, den, q, r;
1667

    
1668
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1669
    den = (t0 & 0xffff);
1670
    if (den == 0) {
1671
        raise_exception(EXCP00_DIVZ);
1672
    }
1673
    q = (num / den);
1674
    if (q > 0xffff)
1675
        raise_exception(EXCP00_DIVZ);
1676
    q &= 0xffff;
1677
    r = (num % den) & 0xffff;
1678
    EAX = (EAX & ~0xffff) | q;
1679
    EDX = (EDX & ~0xffff) | r;
1680
}
1681

    
1682
void helper_idivw_AX(target_ulong t0)
1683
{
1684
    int num, den, q, r;
1685

    
1686
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1687
    den = (int16_t)t0;
1688
    if (den == 0) {
1689
        raise_exception(EXCP00_DIVZ);
1690
    }
1691
    q = (num / den);
1692
    if (q != (int16_t)q)
1693
        raise_exception(EXCP00_DIVZ);
1694
    q &= 0xffff;
1695
    r = (num % den) & 0xffff;
1696
    EAX = (EAX & ~0xffff) | q;
1697
    EDX = (EDX & ~0xffff) | r;
1698
}
1699

    
1700
void helper_divl_EAX(target_ulong t0)
1701
{
1702
    unsigned int den, r;
1703
    uint64_t num, q;
1704

    
1705
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1706
    den = t0;
1707
    if (den == 0) {
1708
        raise_exception(EXCP00_DIVZ);
1709
    }
1710
    q = (num / den);
1711
    r = (num % den);
1712
    if (q > 0xffffffff)
1713
        raise_exception(EXCP00_DIVZ);
1714
    EAX = (uint32_t)q;
1715
    EDX = (uint32_t)r;
1716
}
1717

    
1718
void helper_idivl_EAX(target_ulong t0)
1719
{
1720
    int den, r;
1721
    int64_t num, q;
1722

    
1723
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1724
    den = t0;
1725
    if (den == 0) {
1726
        raise_exception(EXCP00_DIVZ);
1727
    }
1728
    q = (num / den);
1729
    r = (num % den);
1730
    if (q != (int32_t)q)
1731
        raise_exception(EXCP00_DIVZ);
1732
    EAX = (uint32_t)q;
1733
    EDX = (uint32_t)r;
1734
}
1735

    
1736
/* bcd */
1737

    
1738
/* XXX: exception */
1739
void helper_aam(int base)
1740
{
1741
    int al, ah;
1742
    al = EAX & 0xff;
1743
    ah = al / base;
1744
    al = al % base;
1745
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1746
    CC_DST = al;
1747
}
1748

    
1749
void helper_aad(int base)
1750
{
1751
    int al, ah;
1752
    al = EAX & 0xff;
1753
    ah = (EAX >> 8) & 0xff;
1754
    al = ((ah * base) + al) & 0xff;
1755
    EAX = (EAX & ~0xffff) | al;
1756
    CC_DST = al;
1757
}
1758

    
1759
void helper_aaa(void)
1760
{
1761
    int icarry;
1762
    int al, ah, af;
1763
    int eflags;
1764

    
1765
    eflags = helper_cc_compute_all(CC_OP);
1766
    af = eflags & CC_A;
1767
    al = EAX & 0xff;
1768
    ah = (EAX >> 8) & 0xff;
1769

    
1770
    icarry = (al > 0xf9);
1771
    if (((al & 0x0f) > 9 ) || af) {
1772
        al = (al + 6) & 0x0f;
1773
        ah = (ah + 1 + icarry) & 0xff;
1774
        eflags |= CC_C | CC_A;
1775
    } else {
1776
        eflags &= ~(CC_C | CC_A);
1777
        al &= 0x0f;
1778
    }
1779
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1780
    CC_SRC = eflags;
1781
}
1782

    
1783
void helper_aas(void)
1784
{
1785
    int icarry;
1786
    int al, ah, af;
1787
    int eflags;
1788

    
1789
    eflags = helper_cc_compute_all(CC_OP);
1790
    af = eflags & CC_A;
1791
    al = EAX & 0xff;
1792
    ah = (EAX >> 8) & 0xff;
1793

    
1794
    icarry = (al < 6);
1795
    if (((al & 0x0f) > 9 ) || af) {
1796
        al = (al - 6) & 0x0f;
1797
        ah = (ah - 1 - icarry) & 0xff;
1798
        eflags |= CC_C | CC_A;
1799
    } else {
1800
        eflags &= ~(CC_C | CC_A);
1801
        al &= 0x0f;
1802
    }
1803
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1804
    CC_SRC = eflags;
1805
}
1806

    
1807
void helper_daa(void)
1808
{
1809
    int al, af, cf;
1810
    int eflags;
1811

    
1812
    eflags = helper_cc_compute_all(CC_OP);
1813
    cf = eflags & CC_C;
1814
    af = eflags & CC_A;
1815
    al = EAX & 0xff;
1816

    
1817
    eflags = 0;
1818
    if (((al & 0x0f) > 9 ) || af) {
1819
        al = (al + 6) & 0xff;
1820
        eflags |= CC_A;
1821
    }
1822
    if ((al > 0x9f) || cf) {
1823
        al = (al + 0x60) & 0xff;
1824
        eflags |= CC_C;
1825
    }
1826
    EAX = (EAX & ~0xff) | al;
1827
    /* well, speed is not an issue here, so we compute the flags by hand */
1828
    eflags |= (al == 0) << 6; /* zf */
1829
    eflags |= parity_table[al]; /* pf */
1830
    eflags |= (al & 0x80); /* sf */
1831
    CC_SRC = eflags;
1832
}
1833

    
1834
void helper_das(void)
1835
{
1836
    int al, al1, af, cf;
1837
    int eflags;
1838

    
1839
    eflags = helper_cc_compute_all(CC_OP);
1840
    cf = eflags & CC_C;
1841
    af = eflags & CC_A;
1842
    al = EAX & 0xff;
1843

    
1844
    eflags = 0;
1845
    al1 = al;
1846
    if (((al & 0x0f) > 9 ) || af) {
1847
        eflags |= CC_A;
1848
        if (al < 6 || cf)
1849
            eflags |= CC_C;
1850
        al = (al - 6) & 0xff;
1851
    }
1852
    if ((al1 > 0x99) || cf) {
1853
        al = (al - 0x60) & 0xff;
1854
        eflags |= CC_C;
1855
    }
1856
    EAX = (EAX & ~0xff) | al;
1857
    /* well, speed is not an issue here, so we compute the flags by hand */
1858
    eflags |= (al == 0) << 6; /* zf */
1859
    eflags |= parity_table[al]; /* pf */
1860
    eflags |= (al & 0x80); /* sf */
1861
    CC_SRC = eflags;
1862
}
1863

    
1864
void helper_into(int next_eip_addend)
1865
{
1866
    int eflags;
1867
    eflags = helper_cc_compute_all(CC_OP);
1868
    if (eflags & CC_O) {
1869
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1870
    }
1871
}
1872

    
1873
void helper_cmpxchg8b(target_ulong a0)
1874
{
1875
    uint64_t d;
1876
    int eflags;
1877

    
1878
    eflags = helper_cc_compute_all(CC_OP);
1879
    d = ldq(a0);
1880
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1881
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1882
        eflags |= CC_Z;
1883
    } else {
1884
        /* always do the store */
1885
        stq(a0, d); 
1886
        EDX = (uint32_t)(d >> 32);
1887
        EAX = (uint32_t)d;
1888
        eflags &= ~CC_Z;
1889
    }
1890
    CC_SRC = eflags;
1891
}
1892

    
1893
#ifdef TARGET_X86_64
1894
void helper_cmpxchg16b(target_ulong a0)
1895
{
1896
    uint64_t d0, d1;
1897
    int eflags;
1898

    
1899
    if ((a0 & 0xf) != 0)
1900
        raise_exception(EXCP0D_GPF);
1901
    eflags = helper_cc_compute_all(CC_OP);
1902
    d0 = ldq(a0);
1903
    d1 = ldq(a0 + 8);
1904
    if (d0 == EAX && d1 == EDX) {
1905
        stq(a0, EBX);
1906
        stq(a0 + 8, ECX);
1907
        eflags |= CC_Z;
1908
    } else {
1909
        /* always do the store */
1910
        stq(a0, d0); 
1911
        stq(a0 + 8, d1); 
1912
        EDX = d1;
1913
        EAX = d0;
1914
        eflags &= ~CC_Z;
1915
    }
1916
    CC_SRC = eflags;
1917
}
1918
#endif
1919

    
1920
void helper_single_step(void)
1921
{
1922
#ifndef CONFIG_USER_ONLY
1923
    check_hw_breakpoints(env, 1);
1924
    env->dr[6] |= DR6_BS;
1925
#endif
1926
    raise_exception(EXCP01_DB);
1927
}
1928

    
1929
void helper_cpuid(void)
1930
{
1931
    uint32_t eax, ebx, ecx, edx;
1932

    
1933
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1934

    
1935
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1936
    EAX = eax;
1937
    EBX = ebx;
1938
    ECX = ecx;
1939
    EDX = edx;
1940
}
1941

    
1942
void helper_enter_level(int level, int data32, target_ulong t1)
1943
{
1944
    target_ulong ssp;
1945
    uint32_t esp_mask, esp, ebp;
1946

    
1947
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1948
    ssp = env->segs[R_SS].base;
1949
    ebp = EBP;
1950
    esp = ESP;
1951
    if (data32) {
1952
        /* 32 bit */
1953
        esp -= 4;
1954
        while (--level) {
1955
            esp -= 4;
1956
            ebp -= 4;
1957
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1958
        }
1959
        esp -= 4;
1960
        stl(ssp + (esp & esp_mask), t1);
1961
    } else {
1962
        /* 16 bit */
1963
        esp -= 2;
1964
        while (--level) {
1965
            esp -= 2;
1966
            ebp -= 2;
1967
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1968
        }
1969
        esp -= 2;
1970
        stw(ssp + (esp & esp_mask), t1);
1971
    }
1972
}
1973

    
1974
#ifdef TARGET_X86_64
1975
void helper_enter64_level(int level, int data64, target_ulong t1)
1976
{
1977
    target_ulong esp, ebp;
1978
    ebp = EBP;
1979
    esp = ESP;
1980

    
1981
    if (data64) {
1982
        /* 64 bit */
1983
        esp -= 8;
1984
        while (--level) {
1985
            esp -= 8;
1986
            ebp -= 8;
1987
            stq(esp, ldq(ebp));
1988
        }
1989
        esp -= 8;
1990
        stq(esp, t1);
1991
    } else {
1992
        /* 16 bit */
1993
        esp -= 2;
1994
        while (--level) {
1995
            esp -= 2;
1996
            ebp -= 2;
1997
            stw(esp, lduw(ebp));
1998
        }
1999
        esp -= 2;
2000
        stw(esp, t1);
2001
    }
2002
}
2003
#endif
2004

    
2005
void helper_lldt(int selector)
2006
{
2007
    SegmentCache *dt;
2008
    uint32_t e1, e2;
2009
    int index, entry_limit;
2010
    target_ulong ptr;
2011

    
2012
    selector &= 0xffff;
2013
    if ((selector & 0xfffc) == 0) {
2014
        /* XXX: NULL selector case: invalid LDT */
2015
        env->ldt.base = 0;
2016
        env->ldt.limit = 0;
2017
    } else {
2018
        if (selector & 0x4)
2019
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2020
        dt = &env->gdt;
2021
        index = selector & ~7;
2022
#ifdef TARGET_X86_64
2023
        if (env->hflags & HF_LMA_MASK)
2024
            entry_limit = 15;
2025
        else
2026
#endif
2027
            entry_limit = 7;
2028
        if ((index + entry_limit) > dt->limit)
2029
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030
        ptr = dt->base + index;
2031
        e1 = ldl_kernel(ptr);
2032
        e2 = ldl_kernel(ptr + 4);
2033
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2034
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035
        if (!(e2 & DESC_P_MASK))
2036
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2037
#ifdef TARGET_X86_64
2038
        if (env->hflags & HF_LMA_MASK) {
2039
            uint32_t e3;
2040
            e3 = ldl_kernel(ptr + 8);
2041
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2042
            env->ldt.base |= (target_ulong)e3 << 32;
2043
        } else
2044
#endif
2045
        {
2046
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2047
        }
2048
    }
2049
    env->ldt.selector = selector;
2050
}
2051

    
2052
void helper_ltr(int selector)
2053
{
2054
    SegmentCache *dt;
2055
    uint32_t e1, e2;
2056
    int index, type, entry_limit;
2057
    target_ulong ptr;
2058

    
2059
    selector &= 0xffff;
2060
    if ((selector & 0xfffc) == 0) {
2061
        /* NULL selector case: invalid TR */
2062
        env->tr.base = 0;
2063
        env->tr.limit = 0;
2064
        env->tr.flags = 0;
2065
    } else {
2066
        if (selector & 0x4)
2067
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068
        dt = &env->gdt;
2069
        index = selector & ~7;
2070
#ifdef TARGET_X86_64
2071
        if (env->hflags & HF_LMA_MASK)
2072
            entry_limit = 15;
2073
        else
2074
#endif
2075
            entry_limit = 7;
2076
        if ((index + entry_limit) > dt->limit)
2077
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078
        ptr = dt->base + index;
2079
        e1 = ldl_kernel(ptr);
2080
        e2 = ldl_kernel(ptr + 4);
2081
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2082
        if ((e2 & DESC_S_MASK) ||
2083
            (type != 1 && type != 9))
2084
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2085
        if (!(e2 & DESC_P_MASK))
2086
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2087
#ifdef TARGET_X86_64
2088
        if (env->hflags & HF_LMA_MASK) {
2089
            uint32_t e3, e4;
2090
            e3 = ldl_kernel(ptr + 8);
2091
            e4 = ldl_kernel(ptr + 12);
2092
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2093
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2094
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2095
            env->tr.base |= (target_ulong)e3 << 32;
2096
        } else
2097
#endif
2098
        {
2099
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2100
        }
2101
        e2 |= DESC_TSS_BUSY_MASK;
2102
        stl_kernel(ptr + 4, e2);
2103
    }
2104
    env->tr.selector = selector;
2105
}
2106

    
2107
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2108
void helper_load_seg(int seg_reg, int selector)
2109
{
2110
    uint32_t e1, e2;
2111
    int cpl, dpl, rpl;
2112
    SegmentCache *dt;
2113
    int index;
2114
    target_ulong ptr;
2115

    
2116
    selector &= 0xffff;
2117
    cpl = env->hflags & HF_CPL_MASK;
2118
    if ((selector & 0xfffc) == 0) {
2119
        /* null selector case */
2120
        if (seg_reg == R_SS
2121
#ifdef TARGET_X86_64
2122
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2123
#endif
2124
            )
2125
            raise_exception_err(EXCP0D_GPF, 0);
2126
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2127
    } else {
2128

    
2129
        if (selector & 0x4)
2130
            dt = &env->ldt;
2131
        else
2132
            dt = &env->gdt;
2133
        index = selector & ~7;
2134
        if ((index + 7) > dt->limit)
2135
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2136
        ptr = dt->base + index;
2137
        e1 = ldl_kernel(ptr);
2138
        e2 = ldl_kernel(ptr + 4);
2139

    
2140
        if (!(e2 & DESC_S_MASK))
2141
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142
        rpl = selector & 3;
2143
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2144
        if (seg_reg == R_SS) {
2145
            /* must be writable segment */
2146
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2147
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148
            if (rpl != cpl || dpl != cpl)
2149
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2150
        } else {
2151
            /* must be readable segment */
2152
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2153
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154

    
2155
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2156
                /* if not conforming code, test rights */
2157
                if (dpl < cpl || dpl < rpl)
2158
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159
            }
2160
        }
2161

    
2162
        if (!(e2 & DESC_P_MASK)) {
2163
            if (seg_reg == R_SS)
2164
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2165
            else
2166
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2167
        }
2168

    
2169
        /* set the access bit if not already set */
2170
        if (!(e2 & DESC_A_MASK)) {
2171
            e2 |= DESC_A_MASK;
2172
            stl_kernel(ptr + 4, e2);
2173
        }
2174

    
2175
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2176
                       get_seg_base(e1, e2),
2177
                       get_seg_limit(e1, e2),
2178
                       e2);
2179
#if 0
2180
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2181
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2182
#endif
2183
    }
2184
}
2185

    
2186
/* protected mode jump */
2187
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2188
                           int next_eip_addend)
2189
{
2190
    int gate_cs, type;
2191
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2192
    target_ulong next_eip;
2193

    
2194
    if ((new_cs & 0xfffc) == 0)
2195
        raise_exception_err(EXCP0D_GPF, 0);
2196
    if (load_segment(&e1, &e2, new_cs) != 0)
2197
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198
    cpl = env->hflags & HF_CPL_MASK;
2199
    if (e2 & DESC_S_MASK) {
2200
        if (!(e2 & DESC_CS_MASK))
2201
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2203
        if (e2 & DESC_C_MASK) {
2204
            /* conforming code segment */
2205
            if (dpl > cpl)
2206
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207
        } else {
2208
            /* non conforming code segment */
2209
            rpl = new_cs & 3;
2210
            if (rpl > cpl)
2211
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
            if (dpl != cpl)
2213
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2214
        }
2215
        if (!(e2 & DESC_P_MASK))
2216
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2217
        limit = get_seg_limit(e1, e2);
2218
        if (new_eip > limit &&
2219
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2220
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2222
                       get_seg_base(e1, e2), limit, e2);
2223
        EIP = new_eip;
2224
    } else {
2225
        /* jump to call or task gate */
2226
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2227
        rpl = new_cs & 3;
2228
        cpl = env->hflags & HF_CPL_MASK;
2229
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2230
        switch(type) {
2231
        case 1: /* 286 TSS */
2232
        case 9: /* 386 TSS */
2233
        case 5: /* task gate */
2234
            if (dpl < cpl || dpl < rpl)
2235
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2236
            next_eip = env->eip + next_eip_addend;
2237
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2238
            CC_OP = CC_OP_EFLAGS;
2239
            break;
2240
        case 4: /* 286 call gate */
2241
        case 12: /* 386 call gate */
2242
            if ((dpl < cpl) || (dpl < rpl))
2243
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244
            if (!(e2 & DESC_P_MASK))
2245
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2246
            gate_cs = e1 >> 16;
2247
            new_eip = (e1 & 0xffff);
2248
            if (type == 12)
2249
                new_eip |= (e2 & 0xffff0000);
2250
            if (load_segment(&e1, &e2, gate_cs) != 0)
2251
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2252
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2253
            /* must be code segment */
2254
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2255
                 (DESC_S_MASK | DESC_CS_MASK)))
2256
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2257
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2258
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2259
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2260
            if (!(e2 & DESC_P_MASK))
2261
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262
            limit = get_seg_limit(e1, e2);
2263
            if (new_eip > limit)
2264
                raise_exception_err(EXCP0D_GPF, 0);
2265
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2266
                                   get_seg_base(e1, e2), limit, e2);
2267
            EIP = new_eip;
2268
            break;
2269
        default:
2270
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271
            break;
2272
        }
2273
    }
2274
}
2275

    
2276
/* real mode call */
2277
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2278
                       int shift, int next_eip)
2279
{
2280
    int new_eip;
2281
    uint32_t esp, esp_mask;
2282
    target_ulong ssp;
2283

    
2284
    new_eip = new_eip1;
2285
    esp = ESP;
2286
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2287
    ssp = env->segs[R_SS].base;
2288
    if (shift) {
2289
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2290
        PUSHL(ssp, esp, esp_mask, next_eip);
2291
    } else {
2292
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2293
        PUSHW(ssp, esp, esp_mask, next_eip);
2294
    }
2295

    
2296
    SET_ESP(esp, esp_mask);
2297
    env->eip = new_eip;
2298
    env->segs[R_CS].selector = new_cs;
2299
    env->segs[R_CS].base = (new_cs << 4);
2300
}
2301

    
2302
/* protected mode call */
2303
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2304
                            int shift, int next_eip_addend)
2305
{
2306
    int new_stack, i;
2307
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2308
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2309
    uint32_t val, limit, old_sp_mask;
2310
    target_ulong ssp, old_ssp, next_eip;
2311

    
2312
    next_eip = env->eip + next_eip_addend;
2313
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2314
    LOG_PCALL_STATE(env);
2315
    if ((new_cs & 0xfffc) == 0)
2316
        raise_exception_err(EXCP0D_GPF, 0);
2317
    if (load_segment(&e1, &e2, new_cs) != 0)
2318
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2319
    cpl = env->hflags & HF_CPL_MASK;
2320
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2321
    if (e2 & DESC_S_MASK) {
2322
        if (!(e2 & DESC_CS_MASK))
2323
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2324
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2325
        if (e2 & DESC_C_MASK) {
2326
            /* conforming code segment */
2327
            if (dpl > cpl)
2328
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329
        } else {
2330
            /* non conforming code segment */
2331
            rpl = new_cs & 3;
2332
            if (rpl > cpl)
2333
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
            if (dpl != cpl)
2335
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2336
        }
2337
        if (!(e2 & DESC_P_MASK))
2338
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2339

    
2340
#ifdef TARGET_X86_64
2341
        /* XXX: check 16/32 bit cases in long mode */
2342
        if (shift == 2) {
2343
            target_ulong rsp;
2344
            /* 64 bit case */
2345
            rsp = ESP;
2346
            PUSHQ(rsp, env->segs[R_CS].selector);
2347
            PUSHQ(rsp, next_eip);
2348
            /* from this point, not restartable */
2349
            ESP = rsp;
2350
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2351
                                   get_seg_base(e1, e2),
2352
                                   get_seg_limit(e1, e2), e2);
2353
            EIP = new_eip;
2354
        } else
2355
#endif
2356
        {
2357
            sp = ESP;
2358
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2359
            ssp = env->segs[R_SS].base;
2360
            if (shift) {
2361
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2362
                PUSHL(ssp, sp, sp_mask, next_eip);
2363
            } else {
2364
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2365
                PUSHW(ssp, sp, sp_mask, next_eip);
2366
            }
2367

    
2368
            limit = get_seg_limit(e1, e2);
2369
            if (new_eip > limit)
2370
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371
            /* from this point, not restartable */
2372
            SET_ESP(sp, sp_mask);
2373
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2374
                                   get_seg_base(e1, e2), limit, e2);
2375
            EIP = new_eip;
2376
        }
2377
    } else {
2378
        /* check gate type */
2379
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2380
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2381
        rpl = new_cs & 3;
2382
        switch(type) {
2383
        case 1: /* available 286 TSS */
2384
        case 9: /* available 386 TSS */
2385
        case 5: /* task gate */
2386
            if (dpl < cpl || dpl < rpl)
2387
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2388
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2389
            CC_OP = CC_OP_EFLAGS;
2390
            return;
2391
        case 4: /* 286 call gate */
2392
        case 12: /* 386 call gate */
2393
            break;
2394
        default:
2395
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396
            break;
2397
        }
2398
        shift = type >> 3;
2399

    
2400
        if (dpl < cpl || dpl < rpl)
2401
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2402
        /* check valid bit */
2403
        if (!(e2 & DESC_P_MASK))
2404
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2405
        selector = e1 >> 16;
2406
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2407
        param_count = e2 & 0x1f;
2408
        if ((selector & 0xfffc) == 0)
2409
            raise_exception_err(EXCP0D_GPF, 0);
2410

    
2411
        if (load_segment(&e1, &e2, selector) != 0)
2412
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2413
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2414
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2415
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416
        if (dpl > cpl)
2417
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2418
        if (!(e2 & DESC_P_MASK))
2419
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2420

    
2421
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2422
            /* to inner privilege */
2423
            get_ss_esp_from_tss(&ss, &sp, dpl);
2424
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2425
                        ss, sp, param_count, ESP);
2426
            if ((ss & 0xfffc) == 0)
2427
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2428
            if ((ss & 3) != dpl)
2429
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2430
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2431
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2433
            if (ss_dpl != dpl)
2434
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2435
            if (!(ss_e2 & DESC_S_MASK) ||
2436
                (ss_e2 & DESC_CS_MASK) ||
2437
                !(ss_e2 & DESC_W_MASK))
2438
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439
            if (!(ss_e2 & DESC_P_MASK))
2440
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441

    
2442
            //            push_size = ((param_count * 2) + 8) << shift;
2443

    
2444
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2445
            old_ssp = env->segs[R_SS].base;
2446

    
2447
            sp_mask = get_sp_mask(ss_e2);
2448
            ssp = get_seg_base(ss_e1, ss_e2);
2449
            if (shift) {
2450
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2451
                PUSHL(ssp, sp, sp_mask, ESP);
2452
                for(i = param_count - 1; i >= 0; i--) {
2453
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2454
                    PUSHL(ssp, sp, sp_mask, val);
2455
                }
2456
            } else {
2457
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2458
                PUSHW(ssp, sp, sp_mask, ESP);
2459
                for(i = param_count - 1; i >= 0; i--) {
2460
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2461
                    PUSHW(ssp, sp, sp_mask, val);
2462
                }
2463
            }
2464
            new_stack = 1;
2465
        } else {
2466
            /* to same privilege */
2467
            sp = ESP;
2468
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2469
            ssp = env->segs[R_SS].base;
2470
            //            push_size = (4 << shift);
2471
            new_stack = 0;
2472
        }
2473

    
2474
        if (shift) {
2475
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2476
            PUSHL(ssp, sp, sp_mask, next_eip);
2477
        } else {
2478
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2479
            PUSHW(ssp, sp, sp_mask, next_eip);
2480
        }
2481

    
2482
        /* from this point, not restartable */
2483

    
2484
        if (new_stack) {
2485
            ss = (ss & ~3) | dpl;
2486
            cpu_x86_load_seg_cache(env, R_SS, ss,
2487
                                   ssp,
2488
                                   get_seg_limit(ss_e1, ss_e2),
2489
                                   ss_e2);
2490
        }
2491

    
2492
        selector = (selector & ~3) | dpl;
2493
        cpu_x86_load_seg_cache(env, R_CS, selector,
2494
                       get_seg_base(e1, e2),
2495
                       get_seg_limit(e1, e2),
2496
                       e2);
2497
        cpu_x86_set_cpl(env, dpl);
2498
        SET_ESP(sp, sp_mask);
2499
        EIP = offset;
2500
    }
2501
}
2502

    
2503
/* real and vm86 mode iret */
2504
void helper_iret_real(int shift)
2505
{
2506
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2507
    target_ulong ssp;
2508
    int eflags_mask;
2509

    
2510
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2511
    sp = ESP;
2512
    ssp = env->segs[R_SS].base;
2513
    if (shift == 1) {
2514
        /* 32 bits */
2515
        POPL(ssp, sp, sp_mask, new_eip);
2516
        POPL(ssp, sp, sp_mask, new_cs);
2517
        new_cs &= 0xffff;
2518
        POPL(ssp, sp, sp_mask, new_eflags);
2519
    } else {
2520
        /* 16 bits */
2521
        POPW(ssp, sp, sp_mask, new_eip);
2522
        POPW(ssp, sp, sp_mask, new_cs);
2523
        POPW(ssp, sp, sp_mask, new_eflags);
2524
    }
2525
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2526
    env->segs[R_CS].selector = new_cs;
2527
    env->segs[R_CS].base = (new_cs << 4);
2528
    env->eip = new_eip;
2529
    if (env->eflags & VM_MASK)
2530
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2531
    else
2532
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2533
    if (shift == 0)
2534
        eflags_mask &= 0xffff;
2535
    load_eflags(new_eflags, eflags_mask);
2536
    env->hflags2 &= ~HF2_NMI_MASK;
2537
}
2538

    
2539
static inline void validate_seg(int seg_reg, int cpl)
2540
{
2541
    int dpl;
2542
    uint32_t e2;
2543

    
2544
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2545
       they may still contain a valid base. I would be interested to
2546
       know how a real x86_64 CPU behaves */
2547
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2548
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2549
        return;
2550

    
2551
    e2 = env->segs[seg_reg].flags;
2552
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2553
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2554
        /* data or non conforming code segment */
2555
        if (dpl < cpl) {
2556
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2557
        }
2558
    }
2559
}
2560

    
2561
/* protected mode iret */
2562
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2563
{
2564
    uint32_t new_cs, new_eflags, new_ss;
2565
    uint32_t new_es, new_ds, new_fs, new_gs;
2566
    uint32_t e1, e2, ss_e1, ss_e2;
2567
    int cpl, dpl, rpl, eflags_mask, iopl;
2568
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2569

    
2570
#ifdef TARGET_X86_64
2571
    if (shift == 2)
2572
        sp_mask = -1;
2573
    else
2574
#endif
2575
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2576
    sp = ESP;
2577
    ssp = env->segs[R_SS].base;
2578
    new_eflags = 0; /* avoid warning */
2579
#ifdef TARGET_X86_64
2580
    if (shift == 2) {
2581
        POPQ(sp, new_eip);
2582
        POPQ(sp, new_cs);
2583
        new_cs &= 0xffff;
2584
        if (is_iret) {
2585
            POPQ(sp, new_eflags);
2586
        }
2587
    } else
2588
#endif
2589
    if (shift == 1) {
2590
        /* 32 bits */
2591
        POPL(ssp, sp, sp_mask, new_eip);
2592
        POPL(ssp, sp, sp_mask, new_cs);
2593
        new_cs &= 0xffff;
2594
        if (is_iret) {
2595
            POPL(ssp, sp, sp_mask, new_eflags);
2596
            if (new_eflags & VM_MASK)
2597
                goto return_to_vm86;
2598
        }
2599
    } else {
2600
        /* 16 bits */
2601
        POPW(ssp, sp, sp_mask, new_eip);
2602
        POPW(ssp, sp, sp_mask, new_cs);
2603
        if (is_iret)
2604
            POPW(ssp, sp, sp_mask, new_eflags);
2605
    }
2606
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2607
              new_cs, new_eip, shift, addend);
2608
    LOG_PCALL_STATE(env);
2609
    if ((new_cs & 0xfffc) == 0)
2610
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2611
    if (load_segment(&e1, &e2, new_cs) != 0)
2612
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613
    if (!(e2 & DESC_S_MASK) ||
2614
        !(e2 & DESC_CS_MASK))
2615
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2616
    cpl = env->hflags & HF_CPL_MASK;
2617
    rpl = new_cs & 3;
2618
    if (rpl < cpl)
2619
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2621
    if (e2 & DESC_C_MASK) {
2622
        if (dpl > rpl)
2623
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624
    } else {
2625
        if (dpl != rpl)
2626
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627
    }
2628
    if (!(e2 & DESC_P_MASK))
2629
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2630

    
2631
    sp += addend;
2632
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2633
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2634
        /* return to same privilege level */
2635
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2636
                       get_seg_base(e1, e2),
2637
                       get_seg_limit(e1, e2),
2638
                       e2);
2639
    } else {
2640
        /* return to different privilege level */
2641
#ifdef TARGET_X86_64
2642
        if (shift == 2) {
2643
            POPQ(sp, new_esp);
2644
            POPQ(sp, new_ss);
2645
            new_ss &= 0xffff;
2646
        } else
2647
#endif
2648
        if (shift == 1) {
2649
            /* 32 bits */
2650
            POPL(ssp, sp, sp_mask, new_esp);
2651
            POPL(ssp, sp, sp_mask, new_ss);
2652
            new_ss &= 0xffff;
2653
        } else {
2654
            /* 16 bits */
2655
            POPW(ssp, sp, sp_mask, new_esp);
2656
            POPW(ssp, sp, sp_mask, new_ss);
2657
        }
2658
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2659
                    new_ss, new_esp);
2660
        if ((new_ss & 0xfffc) == 0) {
2661
#ifdef TARGET_X86_64
2662
            /* NULL ss is allowed in long mode if cpl != 3*/
2663
            /* XXX: test CS64 ? */
2664
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2665
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2666
                                       0, 0xffffffff,
2667
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2668
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2669
                                       DESC_W_MASK | DESC_A_MASK);
2670
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2671
            } else
2672
#endif
2673
            {
2674
                raise_exception_err(EXCP0D_GPF, 0);
2675
            }
2676
        } else {
2677
            if ((new_ss & 3) != rpl)
2678
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2679
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2680
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2681
            if (!(ss_e2 & DESC_S_MASK) ||
2682
                (ss_e2 & DESC_CS_MASK) ||
2683
                !(ss_e2 & DESC_W_MASK))
2684
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2686
            if (dpl != rpl)
2687
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2688
            if (!(ss_e2 & DESC_P_MASK))
2689
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2690
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2691
                                   get_seg_base(ss_e1, ss_e2),
2692
                                   get_seg_limit(ss_e1, ss_e2),
2693
                                   ss_e2);
2694
        }
2695

    
2696
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2697
                       get_seg_base(e1, e2),
2698
                       get_seg_limit(e1, e2),
2699
                       e2);
2700
        cpu_x86_set_cpl(env, rpl);
2701
        sp = new_esp;
2702
#ifdef TARGET_X86_64
2703
        if (env->hflags & HF_CS64_MASK)
2704
            sp_mask = -1;
2705
        else
2706
#endif
2707
            sp_mask = get_sp_mask(ss_e2);
2708

    
2709
        /* validate data segments */
2710
        validate_seg(R_ES, rpl);
2711
        validate_seg(R_DS, rpl);
2712
        validate_seg(R_FS, rpl);
2713
        validate_seg(R_GS, rpl);
2714

    
2715
        sp += addend;
2716
    }
2717
    SET_ESP(sp, sp_mask);
2718
    env->eip = new_eip;
2719
    if (is_iret) {
2720
        /* NOTE: 'cpl' is the _old_ CPL */
2721
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2722
        if (cpl == 0)
2723
            eflags_mask |= IOPL_MASK;
2724
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2725
        if (cpl <= iopl)
2726
            eflags_mask |= IF_MASK;
2727
        if (shift == 0)
2728
            eflags_mask &= 0xffff;
2729
        load_eflags(new_eflags, eflags_mask);
2730
    }
2731
    return;
2732

    
2733
 return_to_vm86:
2734
    POPL(ssp, sp, sp_mask, new_esp);
2735
    POPL(ssp, sp, sp_mask, new_ss);
2736
    POPL(ssp, sp, sp_mask, new_es);
2737
    POPL(ssp, sp, sp_mask, new_ds);
2738
    POPL(ssp, sp, sp_mask, new_fs);
2739
    POPL(ssp, sp, sp_mask, new_gs);
2740

    
2741
    /* modify processor state */
2742
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2743
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2744
    load_seg_vm(R_CS, new_cs & 0xffff);
2745
    cpu_x86_set_cpl(env, 3);
2746
    load_seg_vm(R_SS, new_ss & 0xffff);
2747
    load_seg_vm(R_ES, new_es & 0xffff);
2748
    load_seg_vm(R_DS, new_ds & 0xffff);
2749
    load_seg_vm(R_FS, new_fs & 0xffff);
2750
    load_seg_vm(R_GS, new_gs & 0xffff);
2751

    
2752
    env->eip = new_eip & 0xffff;
2753
    ESP = new_esp;
2754
}
2755

    
2756
void helper_iret_protected(int shift, int next_eip)
2757
{
2758
    int tss_selector, type;
2759
    uint32_t e1, e2;
2760

    
2761
    /* specific case for TSS */
2762
    if (env->eflags & NT_MASK) {
2763
#ifdef TARGET_X86_64
2764
        if (env->hflags & HF_LMA_MASK)
2765
            raise_exception_err(EXCP0D_GPF, 0);
2766
#endif
2767
        tss_selector = lduw_kernel(env->tr.base + 0);
2768
        if (tss_selector & 4)
2769
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2770
        if (load_segment(&e1, &e2, tss_selector) != 0)
2771
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2772
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2773
        /* NOTE: we check both segment and busy TSS */
2774
        if (type != 3)
2775
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2777
    } else {
2778
        helper_ret_protected(shift, 1, 0);
2779
    }
2780
    env->hflags2 &= ~HF2_NMI_MASK;
2781
}
2782

    
2783
void helper_lret_protected(int shift, int addend)
2784
{
2785
    helper_ret_protected(shift, 0, addend);
2786
}
2787

    
2788
void helper_sysenter(void)
2789
{
2790
    if (env->sysenter_cs == 0) {
2791
        raise_exception_err(EXCP0D_GPF, 0);
2792
    }
2793
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2794
    cpu_x86_set_cpl(env, 0);
2795

    
2796
#ifdef TARGET_X86_64
2797
    if (env->hflags & HF_LMA_MASK) {
2798
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2799
                               0, 0xffffffff,
2800
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2801
                               DESC_S_MASK |
2802
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2803
    } else
2804
#endif
2805
    {
2806
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2807
                               0, 0xffffffff,
2808
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2809
                               DESC_S_MASK |
2810
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2811
    }
2812
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2813
                           0, 0xffffffff,
2814
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815
                           DESC_S_MASK |
2816
                           DESC_W_MASK | DESC_A_MASK);
2817
    ESP = env->sysenter_esp;
2818
    EIP = env->sysenter_eip;
2819
}
2820

    
2821
void helper_sysexit(int dflag)
2822
{
2823
    int cpl;
2824

    
2825
    cpl = env->hflags & HF_CPL_MASK;
2826
    if (env->sysenter_cs == 0 || cpl != 0) {
2827
        raise_exception_err(EXCP0D_GPF, 0);
2828
    }
2829
    cpu_x86_set_cpl(env, 3);
2830
#ifdef TARGET_X86_64
2831
    if (dflag == 2) {
2832
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2833
                               0, 0xffffffff,
2834
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2837
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2838
                               0, 0xffffffff,
2839
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841
                               DESC_W_MASK | DESC_A_MASK);
2842
    } else
2843
#endif
2844
    {
2845
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2846
                               0, 0xffffffff,
2847
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2848
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2849
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2850
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2851
                               0, 0xffffffff,
2852
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854
                               DESC_W_MASK | DESC_A_MASK);
2855
    }
2856
    ESP = ECX;
2857
    EIP = EDX;
2858
}
2859

    
2860
#if defined(CONFIG_USER_ONLY)
2861
target_ulong helper_read_crN(int reg)
2862
{
2863
    return 0;
2864
}
2865

    
2866
void helper_write_crN(int reg, target_ulong t0)
2867
{
2868
}
2869

    
2870
void helper_movl_drN_T0(int reg, target_ulong t0)
2871
{
2872
}
2873
#else
2874
target_ulong helper_read_crN(int reg)
2875
{
2876
    target_ulong val;
2877

    
2878
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2879
    switch(reg) {
2880
    default:
2881
        val = env->cr[reg];
2882
        break;
2883
    case 8:
2884
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2885
            val = cpu_get_apic_tpr(env);
2886
        } else {
2887
            val = env->v_tpr;
2888
        }
2889
        break;
2890
    }
2891
    return val;
2892
}
2893

    
2894
void helper_write_crN(int reg, target_ulong t0)
2895
{
2896
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2897
    switch(reg) {
2898
    case 0:
2899
        cpu_x86_update_cr0(env, t0);
2900
        break;
2901
    case 3:
2902
        cpu_x86_update_cr3(env, t0);
2903
        break;
2904
    case 4:
2905
        cpu_x86_update_cr4(env, t0);
2906
        break;
2907
    case 8:
2908
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2909
            cpu_set_apic_tpr(env, t0);
2910
        }
2911
        env->v_tpr = t0 & 0x0f;
2912
        break;
2913
    default:
2914
        env->cr[reg] = t0;
2915
        break;
2916
    }
2917
}
2918

    
2919
void helper_movl_drN_T0(int reg, target_ulong t0)
2920
{
2921
    int i;
2922

    
2923
    if (reg < 4) {
2924
        hw_breakpoint_remove(env, reg);
2925
        env->dr[reg] = t0;
2926
        hw_breakpoint_insert(env, reg);
2927
    } else if (reg == 7) {
2928
        for (i = 0; i < 4; i++)
2929
            hw_breakpoint_remove(env, i);
2930
        env->dr[7] = t0;
2931
        for (i = 0; i < 4; i++)
2932
            hw_breakpoint_insert(env, i);
2933
    } else
2934
        env->dr[reg] = t0;
2935
}
2936
#endif
2937

    
2938
void helper_lmsw(target_ulong t0)
2939
{
2940
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2941
       if already set to one. */
2942
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2943
    helper_write_crN(0, t0);
2944
}
2945

    
2946
void helper_clts(void)
2947
{
2948
    env->cr[0] &= ~CR0_TS_MASK;
2949
    env->hflags &= ~HF_TS_MASK;
2950
}
2951

    
2952
void helper_invlpg(target_ulong addr)
2953
{
2954
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2955
    tlb_flush_page(env, addr);
2956
}
2957

    
2958
void helper_rdtsc(void)
2959
{
2960
    uint64_t val;
2961

    
2962
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2963
        raise_exception(EXCP0D_GPF);
2964
    }
2965
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2966

    
2967
    val = cpu_get_tsc(env) + env->tsc_offset;
2968
    EAX = (uint32_t)(val);
2969
    EDX = (uint32_t)(val >> 32);
2970
}
2971

    
2972
void helper_rdpmc(void)
2973
{
2974
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2975
        raise_exception(EXCP0D_GPF);
2976
    }
2977
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2978
    
2979
    /* currently unimplemented */
2980
    raise_exception_err(EXCP06_ILLOP, 0);
2981
}
2982

    
2983
#if defined(CONFIG_USER_ONLY)
2984
void helper_wrmsr(void)
2985
{
2986
}
2987

    
2988
void helper_rdmsr(void)
2989
{
2990
}
2991
#else
2992
void helper_wrmsr(void)
2993
{
2994
    uint64_t val;
2995

    
2996
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2997

    
2998
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2999

    
3000
    switch((uint32_t)ECX) {
3001
    case MSR_IA32_SYSENTER_CS:
3002
        env->sysenter_cs = val & 0xffff;
3003
        break;
3004
    case MSR_IA32_SYSENTER_ESP:
3005
        env->sysenter_esp = val;
3006
        break;
3007
    case MSR_IA32_SYSENTER_EIP:
3008
        env->sysenter_eip = val;
3009
        break;
3010
    case MSR_IA32_APICBASE:
3011
        cpu_set_apic_base(env, val);
3012
        break;
3013
    case MSR_EFER:
3014
        {
3015
            uint64_t update_mask;
3016
            update_mask = 0;
3017
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3018
                update_mask |= MSR_EFER_SCE;
3019
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3020
                update_mask |= MSR_EFER_LME;
3021
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3022
                update_mask |= MSR_EFER_FFXSR;
3023
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3024
                update_mask |= MSR_EFER_NXE;
3025
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3026
                update_mask |= MSR_EFER_SVME;
3027
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3028
                update_mask |= MSR_EFER_FFXSR;
3029
            cpu_load_efer(env, (env->efer & ~update_mask) |
3030
                          (val & update_mask));
3031
        }
3032
        break;
3033
    case MSR_STAR:
3034
        env->star = val;
3035
        break;
3036
    case MSR_PAT:
3037
        env->pat = val;
3038
        break;
3039
    case MSR_VM_HSAVE_PA:
3040
        env->vm_hsave = val;
3041
        break;
3042
#ifdef TARGET_X86_64
3043
    case MSR_LSTAR:
3044
        env->lstar = val;
3045
        break;
3046
    case MSR_CSTAR:
3047
        env->cstar = val;
3048
        break;
3049
    case MSR_FMASK:
3050
        env->fmask = val;
3051
        break;
3052
    case MSR_FSBASE:
3053
        env->segs[R_FS].base = val;
3054
        break;
3055
    case MSR_GSBASE:
3056
        env->segs[R_GS].base = val;
3057
        break;
3058
    case MSR_KERNELGSBASE:
3059
        env->kernelgsbase = val;
3060
        break;
3061
#endif
3062
    case MSR_MTRRphysBase(0):
3063
    case MSR_MTRRphysBase(1):
3064
    case MSR_MTRRphysBase(2):
3065
    case MSR_MTRRphysBase(3):
3066
    case MSR_MTRRphysBase(4):
3067
    case MSR_MTRRphysBase(5):
3068
    case MSR_MTRRphysBase(6):
3069
    case MSR_MTRRphysBase(7):
3070
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3071
        break;
3072
    case MSR_MTRRphysMask(0):
3073
    case MSR_MTRRphysMask(1):
3074
    case MSR_MTRRphysMask(2):
3075
    case MSR_MTRRphysMask(3):
3076
    case MSR_MTRRphysMask(4):
3077
    case MSR_MTRRphysMask(5):
3078
    case MSR_MTRRphysMask(6):
3079
    case MSR_MTRRphysMask(7):
3080
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3081
        break;
3082
    case MSR_MTRRfix64K_00000:
3083
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3084
        break;
3085
    case MSR_MTRRfix16K_80000:
3086
    case MSR_MTRRfix16K_A0000:
3087
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3088
        break;
3089
    case MSR_MTRRfix4K_C0000:
3090
    case MSR_MTRRfix4K_C8000:
3091
    case MSR_MTRRfix4K_D0000:
3092
    case MSR_MTRRfix4K_D8000:
3093
    case MSR_MTRRfix4K_E0000:
3094
    case MSR_MTRRfix4K_E8000:
3095
    case MSR_MTRRfix4K_F0000:
3096
    case MSR_MTRRfix4K_F8000:
3097
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3098
        break;
3099
    case MSR_MTRRdefType:
3100
        env->mtrr_deftype = val;
3101
        break;
3102
    case MSR_MCG_STATUS:
3103
        env->mcg_status = val;
3104
        break;
3105
    case MSR_MCG_CTL:
3106
        if ((env->mcg_cap & MCG_CTL_P)
3107
            && (val == 0 || val == ~(uint64_t)0))
3108
            env->mcg_ctl = val;
3109
        break;
3110
    default:
3111
        if ((uint32_t)ECX >= MSR_MC0_CTL
3112
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3113
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3114
            if ((offset & 0x3) != 0
3115
                || (val == 0 || val == ~(uint64_t)0))
3116
                env->mce_banks[offset] = val;
3117
            break;
3118
        }
3119
        /* XXX: exception ? */
3120
        break;
3121
    }
3122
}
3123

    
3124
void helper_rdmsr(void)
3125
{
3126
    uint64_t val;
3127

    
3128
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3129

    
3130
    switch((uint32_t)ECX) {
3131
    case MSR_IA32_SYSENTER_CS:
3132
        val = env->sysenter_cs;
3133
        break;
3134
    case MSR_IA32_SYSENTER_ESP:
3135
        val = env->sysenter_esp;
3136
        break;
3137
    case MSR_IA32_SYSENTER_EIP:
3138
        val = env->sysenter_eip;
3139
        break;
3140
    case MSR_IA32_APICBASE:
3141
        val = cpu_get_apic_base(env);
3142
        break;
3143
    case MSR_EFER:
3144
        val = env->efer;
3145
        break;
3146
    case MSR_STAR:
3147
        val = env->star;
3148
        break;
3149
    case MSR_PAT:
3150
        val = env->pat;
3151
        break;
3152
    case MSR_VM_HSAVE_PA:
3153
        val = env->vm_hsave;
3154
        break;
3155
    case MSR_IA32_PERF_STATUS:
3156
        /* tsc_increment_by_tick */
3157
        val = 1000ULL;
3158
        /* CPU multiplier */
3159
        val |= (((uint64_t)4ULL) << 40);
3160
        break;
3161
#ifdef TARGET_X86_64
3162
    case MSR_LSTAR:
3163
        val = env->lstar;
3164
        break;
3165
    case MSR_CSTAR:
3166
        val = env->cstar;
3167
        break;
3168
    case MSR_FMASK:
3169
        val = env->fmask;
3170
        break;
3171
    case MSR_FSBASE:
3172
        val = env->segs[R_FS].base;
3173
        break;
3174
    case MSR_GSBASE:
3175
        val = env->segs[R_GS].base;
3176
        break;
3177
    case MSR_KERNELGSBASE:
3178
        val = env->kernelgsbase;
3179
        break;
3180
#endif
3181
    case MSR_MTRRphysBase(0):
3182
    case MSR_MTRRphysBase(1):
3183
    case MSR_MTRRphysBase(2):
3184
    case MSR_MTRRphysBase(3):
3185
    case MSR_MTRRphysBase(4):
3186
    case MSR_MTRRphysBase(5):
3187
    case MSR_MTRRphysBase(6):
3188
    case MSR_MTRRphysBase(7):
3189
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3190
        break;
3191
    case MSR_MTRRphysMask(0):
3192
    case MSR_MTRRphysMask(1):
3193
    case MSR_MTRRphysMask(2):
3194
    case MSR_MTRRphysMask(3):
3195
    case MSR_MTRRphysMask(4):
3196
    case MSR_MTRRphysMask(5):
3197
    case MSR_MTRRphysMask(6):
3198
    case MSR_MTRRphysMask(7):
3199
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3200
        break;
3201
    case MSR_MTRRfix64K_00000:
3202
        val = env->mtrr_fixed[0];
3203
        break;
3204
    case MSR_MTRRfix16K_80000:
3205
    case MSR_MTRRfix16K_A0000:
3206
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3207
        break;
3208
    case MSR_MTRRfix4K_C0000:
3209
    case MSR_MTRRfix4K_C8000:
3210
    case MSR_MTRRfix4K_D0000:
3211
    case MSR_MTRRfix4K_D8000:
3212
    case MSR_MTRRfix4K_E0000:
3213
    case MSR_MTRRfix4K_E8000:
3214
    case MSR_MTRRfix4K_F0000:
3215
    case MSR_MTRRfix4K_F8000:
3216
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3217
        break;
3218
    case MSR_MTRRdefType:
3219
        val = env->mtrr_deftype;
3220
        break;
3221
    case MSR_MTRRcap:
3222
        if (env->cpuid_features & CPUID_MTRR)
3223
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3224
        else
3225
            /* XXX: exception ? */
3226
            val = 0;
3227
        break;
3228
    case MSR_MCG_CAP:
3229
        val = env->mcg_cap;
3230
        break;
3231
    case MSR_MCG_CTL:
3232
        if (env->mcg_cap & MCG_CTL_P)
3233
            val = env->mcg_ctl;
3234
        else
3235
            val = 0;
3236
        break;
3237
    case MSR_MCG_STATUS:
3238
        val = env->mcg_status;
3239
        break;
3240
    default:
3241
        if ((uint32_t)ECX >= MSR_MC0_CTL
3242
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3243
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3244
            val = env->mce_banks[offset];
3245
            break;
3246
        }
3247
        /* XXX: exception ? */
3248
        val = 0;
3249
        break;
3250
    }
3251
    EAX = (uint32_t)(val);
3252
    EDX = (uint32_t)(val >> 32);
3253
}
3254
#endif
3255

    
3256
target_ulong helper_lsl(target_ulong selector1)
3257
{
3258
    unsigned int limit;
3259
    uint32_t e1, e2, eflags, selector;
3260
    int rpl, dpl, cpl, type;
3261

    
3262
    selector = selector1 & 0xffff;
3263
    eflags = helper_cc_compute_all(CC_OP);
3264
    if ((selector & 0xfffc) == 0)
3265
        goto fail;
3266
    if (load_segment(&e1, &e2, selector) != 0)
3267
        goto fail;
3268
    rpl = selector & 3;
3269
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3270
    cpl = env->hflags & HF_CPL_MASK;
3271
    if (e2 & DESC_S_MASK) {
3272
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3273
            /* conforming */
3274
        } else {
3275
            if (dpl < cpl || dpl < rpl)
3276
                goto fail;
3277
        }
3278
    } else {
3279
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3280
        switch(type) {
3281
        case 1:
3282
        case 2:
3283
        case 3:
3284
        case 9:
3285
        case 11:
3286
            break;
3287
        default:
3288
            goto fail;
3289
        }
3290
        if (dpl < cpl || dpl < rpl) {
3291
        fail:
3292
            CC_SRC = eflags & ~CC_Z;
3293
            return 0;
3294
        }
3295
    }
3296
    limit = get_seg_limit(e1, e2);
3297
    CC_SRC = eflags | CC_Z;
3298
    return limit;
3299
}
3300

    
3301
target_ulong helper_lar(target_ulong selector1)
3302
{
3303
    uint32_t e1, e2, eflags, selector;
3304
    int rpl, dpl, cpl, type;
3305

    
3306
    selector = selector1 & 0xffff;
3307
    eflags = helper_cc_compute_all(CC_OP);
3308
    if ((selector & 0xfffc) == 0)
3309
        goto fail;
3310
    if (load_segment(&e1, &e2, selector) != 0)
3311
        goto fail;
3312
    rpl = selector & 3;
3313
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3314
    cpl = env->hflags & HF_CPL_MASK;
3315
    if (e2 & DESC_S_MASK) {
3316
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3317
            /* conforming */
3318
        } else {
3319
            if (dpl < cpl || dpl < rpl)
3320
                goto fail;
3321
        }
3322
    } else {
3323
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3324
        switch(type) {
3325
        case 1:
3326
        case 2:
3327
        case 3:
3328
        case 4:
3329
        case 5:
3330
        case 9:
3331
        case 11:
3332
        case 12:
3333
            break;
3334
        default:
3335
            goto fail;
3336
        }
3337
        if (dpl < cpl || dpl < rpl) {
3338
        fail:
3339
            CC_SRC = eflags & ~CC_Z;
3340
            return 0;
3341
        }
3342
    }
3343
    CC_SRC = eflags | CC_Z;
3344
    return e2 & 0x00f0ff00;
3345
}
3346

    
3347
void helper_verr(target_ulong selector1)
3348
{
3349
    uint32_t e1, e2, eflags, selector;
3350
    int rpl, dpl, cpl;
3351

    
3352
    selector = selector1 & 0xffff;
3353
    eflags = helper_cc_compute_all(CC_OP);
3354
    if ((selector & 0xfffc) == 0)
3355
        goto fail;
3356
    if (load_segment(&e1, &e2, selector) != 0)
3357
        goto fail;
3358
    if (!(e2 & DESC_S_MASK))
3359
        goto fail;
3360
    rpl = selector & 3;
3361
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3362
    cpl = env->hflags & HF_CPL_MASK;
3363
    if (e2 & DESC_CS_MASK) {
3364
        if (!(e2 & DESC_R_MASK))
3365
            goto fail;
3366
        if (!(e2 & DESC_C_MASK)) {
3367
            if (dpl < cpl || dpl < rpl)
3368
                goto fail;
3369
        }
3370
    } else {
3371
        if (dpl < cpl || dpl < rpl) {
3372
        fail:
3373
            CC_SRC = eflags & ~CC_Z;
3374
            return;
3375
        }
3376
    }
3377
    CC_SRC = eflags | CC_Z;
3378
}
3379

    
3380
void helper_verw(target_ulong selector1)
3381
{
3382
    uint32_t e1, e2, eflags, selector;
3383
    int rpl, dpl, cpl;
3384

    
3385
    selector = selector1 & 0xffff;
3386
    eflags = helper_cc_compute_all(CC_OP);
3387
    if ((selector & 0xfffc) == 0)
3388
        goto fail;
3389
    if (load_segment(&e1, &e2, selector) != 0)
3390
        goto fail;
3391
    if (!(e2 & DESC_S_MASK))
3392
        goto fail;
3393
    rpl = selector & 3;
3394
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3395
    cpl = env->hflags & HF_CPL_MASK;
3396
    if (e2 & DESC_CS_MASK) {
3397
        goto fail;
3398
    } else {
3399
        if (dpl < cpl || dpl < rpl)
3400
            goto fail;
3401
        if (!(e2 & DESC_W_MASK)) {
3402
        fail:
3403
            CC_SRC = eflags & ~CC_Z;
3404
            return;
3405
        }
3406
    }
3407
    CC_SRC = eflags | CC_Z;
3408
}
3409

    
3410
/* x87 FPU helpers */
3411

    
3412
static void fpu_set_exception(int mask)
3413
{
3414
    env->fpus |= mask;
3415
    if (env->fpus & (~env->fpuc & FPUC_EM))
3416
        env->fpus |= FPUS_SE | FPUS_B;
3417
}
3418

    
3419
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3420
{
3421
    if (b == 0.0)
3422
        fpu_set_exception(FPUS_ZE);
3423
    return a / b;
3424
}
3425

    
3426
static void fpu_raise_exception(void)
3427
{
3428
    if (env->cr[0] & CR0_NE_MASK) {
3429
        raise_exception(EXCP10_COPR);
3430
    }
3431
#if !defined(CONFIG_USER_ONLY)
3432
    else {
3433
        cpu_set_ferr(env);
3434
    }
3435
#endif
3436
}
3437

    
3438
void helper_flds_FT0(uint32_t val)
3439
{
3440
    union {
3441
        float32 f;
3442
        uint32_t i;
3443
    } u;
3444
    u.i = val;
3445
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3446
}
3447

    
3448
void helper_fldl_FT0(uint64_t val)
3449
{
3450
    union {
3451
        float64 f;
3452
        uint64_t i;
3453
    } u;
3454
    u.i = val;
3455
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3456
}
3457

    
3458
void helper_fildl_FT0(int32_t val)
3459
{
3460
    FT0 = int32_to_floatx(val, &env->fp_status);
3461
}
3462

    
3463
void helper_flds_ST0(uint32_t val)
3464
{
3465
    int new_fpstt;
3466
    union {
3467
        float32 f;
3468
        uint32_t i;
3469
    } u;
3470
    new_fpstt = (env->fpstt - 1) & 7;
3471
    u.i = val;
3472
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3473
    env->fpstt = new_fpstt;
3474
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3475
}
3476

    
3477
void helper_fldl_ST0(uint64_t val)
3478
{
3479
    int new_fpstt;
3480
    union {
3481
        float64 f;
3482
        uint64_t i;
3483
    } u;
3484
    new_fpstt = (env->fpstt - 1) & 7;
3485
    u.i = val;
3486
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3487
    env->fpstt = new_fpstt;
3488
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3489
}
3490

    
3491
void helper_fildl_ST0(int32_t val)
3492
{
3493
    int new_fpstt;
3494
    new_fpstt = (env->fpstt - 1) & 7;
3495
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3496
    env->fpstt = new_fpstt;
3497
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3498
}
3499

    
3500
void helper_fildll_ST0(int64_t val)
3501
{
3502
    int new_fpstt;
3503
    new_fpstt = (env->fpstt - 1) & 7;
3504
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3505
    env->fpstt = new_fpstt;
3506
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3507
}
3508

    
3509
uint32_t helper_fsts_ST0(void)
3510
{
3511
    union {
3512
        float32 f;
3513
        uint32_t i;
3514
    } u;
3515
    u.f = floatx_to_float32(ST0, &env->fp_status);
3516
    return u.i;
3517
}
3518

    
3519
uint64_t helper_fstl_ST0(void)
3520
{
3521
    union {
3522
        float64 f;
3523
        uint64_t i;
3524
    } u;
3525
    u.f = floatx_to_float64(ST0, &env->fp_status);
3526
    return u.i;
3527
}
3528

    
3529
int32_t helper_fist_ST0(void)
3530
{
3531
    int32_t val;
3532
    val = floatx_to_int32(ST0, &env->fp_status);
3533
    if (val != (int16_t)val)
3534
        val = -32768;
3535
    return val;
3536
}
3537

    
3538
int32_t helper_fistl_ST0(void)
3539
{
3540
    int32_t val;
3541
    val = floatx_to_int32(ST0, &env->fp_status);
3542
    return val;
3543
}
3544

    
3545
int64_t helper_fistll_ST0(void)
3546
{
3547
    int64_t val;
3548
    val = floatx_to_int64(ST0, &env->fp_status);
3549
    return val;
3550
}
3551

    
3552
int32_t helper_fistt_ST0(void)
3553
{
3554
    int32_t val;
3555
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3556
    if (val != (int16_t)val)
3557
        val = -32768;
3558
    return val;
3559
}
3560

    
3561
int32_t helper_fisttl_ST0(void)
3562
{
3563
    int32_t val;
3564
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3565
    return val;
3566
}
3567

    
3568
int64_t helper_fisttll_ST0(void)
3569
{
3570
    int64_t val;
3571
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3572
    return val;
3573
}
3574

    
3575
void helper_fldt_ST0(target_ulong ptr)
3576
{
3577
    int new_fpstt;
3578
    new_fpstt = (env->fpstt - 1) & 7;
3579
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3580
    env->fpstt = new_fpstt;
3581
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3582
}
3583

    
3584
void helper_fstt_ST0(target_ulong ptr)
3585
{
3586
    helper_fstt(ST0, ptr);
3587
}
3588

    
3589
void helper_fpush(void)
3590
{
3591
    fpush();
3592
}
3593

    
3594
void helper_fpop(void)
3595
{
3596
    fpop();
3597
}
3598

    
3599
void helper_fdecstp(void)
3600
{
3601
    env->fpstt = (env->fpstt - 1) & 7;
3602
    env->fpus &= (~0x4700);
3603
}
3604

    
3605
void helper_fincstp(void)
3606
{
3607
    env->fpstt = (env->fpstt + 1) & 7;
3608
    env->fpus &= (~0x4700);
3609
}
3610

    
3611
/* FPU move */
3612

    
3613
void helper_ffree_STN(int st_index)
3614
{
3615
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3616
}
3617

    
3618
void helper_fmov_ST0_FT0(void)
3619
{
3620
    ST0 = FT0;
3621
}
3622

    
3623
void helper_fmov_FT0_STN(int st_index)
3624
{
3625
    FT0 = ST(st_index);
3626
}
3627

    
3628
void helper_fmov_ST0_STN(int st_index)
3629
{
3630
    ST0 = ST(st_index);
3631
}
3632

    
3633
void helper_fmov_STN_ST0(int st_index)
3634
{
3635
    ST(st_index) = ST0;
3636
}
3637

    
3638
void helper_fxchg_ST0_STN(int st_index)
3639
{
3640
    CPU86_LDouble tmp;
3641
    tmp = ST(st_index);
3642
    ST(st_index) = ST0;
3643
    ST0 = tmp;
3644
}
3645

    
3646
/* FPU operations */
3647

    
3648
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3649

    
3650
void helper_fcom_ST0_FT0(void)
3651
{
3652
    int ret;
3653

    
3654
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3655
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3656
}
3657

    
3658
void helper_fucom_ST0_FT0(void)
3659
{
3660
    int ret;
3661

    
3662
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3663
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3664
}
3665

    
3666
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3667

    
3668
void helper_fcomi_ST0_FT0(void)
3669
{
3670
    int eflags;
3671
    int ret;
3672

    
3673
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3674
    eflags = helper_cc_compute_all(CC_OP);
3675
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3676
    CC_SRC = eflags;
3677
}
3678

    
3679
void helper_fucomi_ST0_FT0(void)
3680
{
3681
    int eflags;
3682
    int ret;
3683

    
3684
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3685
    eflags = helper_cc_compute_all(CC_OP);
3686
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3687
    CC_SRC = eflags;
3688
}
3689

    
3690
void helper_fadd_ST0_FT0(void)
3691
{
3692
    ST0 += FT0;
3693
}
3694

    
3695
void helper_fmul_ST0_FT0(void)
3696
{
3697
    ST0 *= FT0;
3698
}
3699

    
3700
void helper_fsub_ST0_FT0(void)
3701
{
3702
    ST0 -= FT0;
3703
}
3704

    
3705
void helper_fsubr_ST0_FT0(void)
3706
{
3707
    ST0 = FT0 - ST0;
3708
}
3709

    
3710
void helper_fdiv_ST0_FT0(void)
3711
{
3712
    ST0 = helper_fdiv(ST0, FT0);
3713
}
3714

    
3715
void helper_fdivr_ST0_FT0(void)
3716
{
3717
    ST0 = helper_fdiv(FT0, ST0);
3718
}
3719

    
3720
/* fp operations between STN and ST0 */
3721

    
3722
void helper_fadd_STN_ST0(int st_index)
3723
{
3724
    ST(st_index) += ST0;
3725
}
3726

    
3727
void helper_fmul_STN_ST0(int st_index)
3728
{
3729
    ST(st_index) *= ST0;
3730
}
3731

    
3732
void helper_fsub_STN_ST0(int st_index)
3733
{
3734
    ST(st_index) -= ST0;
3735
}
3736

    
3737
void helper_fsubr_STN_ST0(int st_index)
3738
{
3739
    CPU86_LDouble *p;
3740
    p = &ST(st_index);
3741
    *p = ST0 - *p;
3742
}
3743

    
3744
void helper_fdiv_STN_ST0(int st_index)
3745
{
3746
    CPU86_LDouble *p;
3747
    p = &ST(st_index);
3748
    *p = helper_fdiv(*p, ST0);
3749
}
3750

    
3751
void helper_fdivr_STN_ST0(int st_index)
3752
{
3753
    CPU86_LDouble *p;
3754
    p = &ST(st_index);
3755
    *p = helper_fdiv(ST0, *p);
3756
}
3757

    
3758
/* misc FPU operations */
3759
void helper_fchs_ST0(void)
3760
{
3761
    ST0 = floatx_chs(ST0);
3762
}
3763

    
3764
void helper_fabs_ST0(void)
3765
{
3766
    ST0 = floatx_abs(ST0);
3767
}
3768

    
3769
void helper_fld1_ST0(void)
3770
{
3771
    ST0 = f15rk[1];
3772
}
3773

    
3774
void helper_fldl2t_ST0(void)
3775
{
3776
    ST0 = f15rk[6];
3777
}
3778

    
3779
void helper_fldl2e_ST0(void)
3780
{
3781
    ST0 = f15rk[5];
3782
}
3783

    
3784
void helper_fldpi_ST0(void)
3785
{
3786
    ST0 = f15rk[2];
3787
}
3788

    
3789
void helper_fldlg2_ST0(void)
3790
{
3791
    ST0 = f15rk[3];
3792
}
3793

    
3794
void helper_fldln2_ST0(void)
3795
{
3796
    ST0 = f15rk[4];
3797
}
3798

    
3799
void helper_fldz_ST0(void)
3800
{
3801
    ST0 = f15rk[0];
3802
}
3803

    
3804
void helper_fldz_FT0(void)
3805
{
3806
    FT0 = f15rk[0];
3807
}
3808

    
3809
uint32_t helper_fnstsw(void)
3810
{
3811
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3812
}
3813

    
3814
uint32_t helper_fnstcw(void)
3815
{
3816
    return env->fpuc;
3817
}
3818

    
3819
static void update_fp_status(void)
3820
{
3821
    int rnd_type;
3822

    
3823
    /* set rounding mode */
3824
    switch(env->fpuc & RC_MASK) {
3825
    default:
3826
    case RC_NEAR:
3827
        rnd_type = float_round_nearest_even;
3828
        break;
3829
    case RC_DOWN:
3830
        rnd_type = float_round_down;
3831
        break;
3832
    case RC_UP:
3833
        rnd_type = float_round_up;
3834
        break;
3835
    case RC_CHOP:
3836
        rnd_type = float_round_to_zero;
3837
        break;
3838
    }
3839
    set_float_rounding_mode(rnd_type, &env->fp_status);
3840
#ifdef FLOATX80
3841
    switch((env->fpuc >> 8) & 3) {
3842
    case 0:
3843
        rnd_type = 32;
3844
        break;
3845
    case 2:
3846
        rnd_type = 64;
3847
        break;
3848
    case 3:
3849
    default:
3850
        rnd_type = 80;
3851
        break;
3852
    }
3853
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3854
#endif
3855
}
3856

    
3857
void helper_fldcw(uint32_t val)
3858
{
3859
    env->fpuc = val;
3860
    update_fp_status();
3861
}
3862

    
3863
void helper_fclex(void)
3864
{
3865
    env->fpus &= 0x7f00;
3866
}
3867

    
3868
void helper_fwait(void)
3869
{
3870
    if (env->fpus & FPUS_SE)
3871
        fpu_raise_exception();
3872
}
3873

    
3874
void helper_fninit(void)
3875
{
3876
    env->fpus = 0;
3877
    env->fpstt = 0;
3878
    env->fpuc = 0x37f;
3879
    env->fptags[0] = 1;
3880
    env->fptags[1] = 1;
3881
    env->fptags[2] = 1;
3882
    env->fptags[3] = 1;
3883
    env->fptags[4] = 1;
3884
    env->fptags[5] = 1;
3885
    env->fptags[6] = 1;
3886
    env->fptags[7] = 1;
3887
}
3888

    
3889
/* BCD ops */
3890

    
3891
void helper_fbld_ST0(target_ulong ptr)
3892
{
3893
    CPU86_LDouble tmp;
3894
    uint64_t val;
3895
    unsigned int v;
3896
    int i;
3897

    
3898
    val = 0;
3899
    for(i = 8; i >= 0; i--) {
3900
        v = ldub(ptr + i);
3901
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3902
    }
3903
    tmp = val;
3904
    if (ldub(ptr + 9) & 0x80)
3905
        tmp = -tmp;
3906
    fpush();
3907
    ST0 = tmp;
3908
}
3909

    
3910
void helper_fbst_ST0(target_ulong ptr)
3911
{
3912
    int v;
3913
    target_ulong mem_ref, mem_end;
3914
    int64_t val;
3915

    
3916
    val = floatx_to_int64(ST0, &env->fp_status);
3917
    mem_ref = ptr;
3918
    mem_end = mem_ref + 9;
3919
    if (val < 0) {
3920
        stb(mem_end, 0x80);
3921
        val = -val;
3922
    } else {
3923
        stb(mem_end, 0x00);
3924
    }
3925
    while (mem_ref < mem_end) {
3926
        if (val == 0)
3927
            break;
3928
        v = val % 100;
3929
        val = val / 100;
3930
        v = ((v / 10) << 4) | (v % 10);
3931
        stb(mem_ref++, v);
3932
    }
3933
    while (mem_ref < mem_end) {
3934
        stb(mem_ref++, 0);
3935
    }
3936
}
3937

    
3938
void helper_f2xm1(void)
3939
{
3940
    ST0 = pow(2.0,ST0) - 1.0;
3941
}
3942

    
3943
void helper_fyl2x(void)
3944
{
3945
    CPU86_LDouble fptemp;
3946

    
3947
    fptemp = ST0;
3948
    if (fptemp>0.0){
3949
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3950
        ST1 *= fptemp;
3951
        fpop();
3952
    } else {
3953
        env->fpus &= (~0x4700);
3954
        env->fpus |= 0x400;
3955
    }
3956
}
3957

    
3958
void helper_fptan(void)
3959
{
3960
    CPU86_LDouble fptemp;
3961

    
3962
    fptemp = ST0;
3963
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3964
        env->fpus |= 0x400;
3965
    } else {
3966
        ST0 = tan(fptemp);
3967
        fpush();
3968
        ST0 = 1.0;
3969
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3970
        /* the above code is for  |arg| < 2**52 only */
3971
    }
3972
}
3973

    
3974
void helper_fpatan(void)
3975
{
3976
    CPU86_LDouble fptemp, fpsrcop;
3977

    
3978
    fpsrcop = ST1;
3979
    fptemp = ST0;
3980
    ST1 = atan2(fpsrcop,fptemp);
3981
    fpop();
3982
}
3983

    
3984
void helper_fxtract(void)
3985
{
3986
    CPU86_LDoubleU temp;
3987
    unsigned int expdif;
3988

    
3989
    temp.d = ST0;
3990
    expdif = EXPD(temp) - EXPBIAS;
3991
    /*DP exponent bias*/
3992
    ST0 = expdif;
3993
    fpush();
3994
    BIASEXPONENT(temp);
3995
    ST0 = temp.d;
3996
}
3997

    
3998
void helper_fprem1(void)
3999
{
4000
    CPU86_LDouble dblq, fpsrcop, fptemp;
4001
    CPU86_LDoubleU fpsrcop1, fptemp1;
4002
    int expdif;
4003
    signed long long int q;
4004

    
4005
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4006
        ST0 = 0.0 / 0.0; /* NaN */
4007
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4008
        return;
4009
    }
4010

    
4011
    fpsrcop = ST0;
4012
    fptemp = ST1;
4013
    fpsrcop1.d = fpsrcop;
4014
    fptemp1.d = fptemp;
4015
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4016

    
4017
    if (expdif < 0) {
4018
        /* optimisation? taken from the AMD docs */
4019
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4020
        /* ST0 is unchanged */
4021
        return;
4022
    }
4023

    
4024
    if (expdif < 53) {
4025
        dblq = fpsrcop / fptemp;
4026
        /* round dblq towards nearest integer */
4027
        dblq = rint(dblq);
4028
        ST0 = fpsrcop - fptemp * dblq;
4029

    
4030
        /* convert dblq to q by truncating towards zero */
4031
        if (dblq < 0.0)
4032
           q = (signed long long int)(-dblq);
4033
        else
4034
           q = (signed long long int)dblq;
4035

    
4036
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4037
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4038
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4039
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4040
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4041
    } else {
4042
        env->fpus |= 0x400;  /* C2 <-- 1 */
4043
        fptemp = pow(2.0, expdif - 50);
4044
        fpsrcop = (ST0 / ST1) / fptemp;
4045
        /* fpsrcop = integer obtained by chopping */
4046
        fpsrcop = (fpsrcop < 0.0) ?
4047
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4048
        ST0 -= (ST1 * fpsrcop * fptemp);
4049
    }
4050
}
4051

    
4052
void helper_fprem(void)
4053
{
4054
    CPU86_LDouble dblq, fpsrcop, fptemp;
4055
    CPU86_LDoubleU fpsrcop1, fptemp1;
4056
    int expdif;
4057
    signed long long int q;
4058

    
4059
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4060
       ST0 = 0.0 / 0.0; /* NaN */
4061
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4062
       return;
4063
    }
4064

    
4065
    fpsrcop = (CPU86_LDouble)ST0;
4066
    fptemp = (CPU86_LDouble)ST1;
4067
    fpsrcop1.d = fpsrcop;
4068
    fptemp1.d = fptemp;
4069
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4070

    
4071
    if (expdif < 0) {
4072
        /* optimisation? taken from the AMD docs */
4073
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4074
        /* ST0 is unchanged */
4075
        return;
4076
    }
4077

    
4078
    if ( expdif < 53 ) {
4079
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4080
        /* round dblq towards zero */
4081
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4082
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4083

    
4084
        /* convert dblq to q by truncating towards zero */
4085
        if (dblq < 0.0)
4086
           q = (signed long long int)(-dblq);
4087
        else
4088
           q = (signed long long int)dblq;
4089

    
4090
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4091
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4092
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4093
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4094
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4095
    } else {
4096
        int N = 32 + (expdif % 32); /* as per AMD docs */
4097
        env->fpus |= 0x400;  /* C2 <-- 1 */
4098
        fptemp = pow(2.0, (double)(expdif - N));
4099
        fpsrcop = (ST0 / ST1) / fptemp;
4100
        /* fpsrcop = integer obtained by chopping */
4101
        fpsrcop = (fpsrcop < 0.0) ?
4102
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4103
        ST0 -= (ST1 * fpsrcop * fptemp);
4104
    }
4105
}
4106

    
4107
void helper_fyl2xp1(void)
4108
{
4109
    CPU86_LDouble fptemp;
4110

    
4111
    fptemp = ST0;
4112
    if ((fptemp+1.0)>0.0) {
4113
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4114
        ST1 *= fptemp;
4115
        fpop();
4116
    } else {
4117
        env->fpus &= (~0x4700);
4118
        env->fpus |= 0x400;
4119
    }
4120
}
4121

    
4122
void helper_fsqrt(void)
4123
{
4124
    CPU86_LDouble fptemp;
4125

    
4126
    fptemp = ST0;
4127
    if (fptemp<0.0) {
4128
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4129
        env->fpus |= 0x400;
4130
    }
4131
    ST0 = sqrt(fptemp);
4132
}
4133

    
4134
void helper_fsincos(void)
4135
{
4136
    CPU86_LDouble fptemp;
4137

    
4138
    fptemp = ST0;
4139
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4140
        env->fpus |= 0x400;
4141
    } else {
4142
        ST0 = sin(fptemp);
4143
        fpush();
4144
        ST0 = cos(fptemp);
4145
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4146
        /* the above code is for  |arg| < 2**63 only */
4147
    }
4148
}
4149

    
4150
void helper_frndint(void)
4151
{
4152
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4153
}
4154

    
4155
void helper_fscale(void)
4156
{
4157
    ST0 = ldexp (ST0, (int)(ST1));
4158
}
4159

    
4160
void helper_fsin(void)
4161
{
4162
    CPU86_LDouble fptemp;
4163

    
4164
    fptemp = ST0;
4165
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4166
        env->fpus |= 0x400;
4167
    } else {
4168
        ST0 = sin(fptemp);
4169
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4170
        /* the above code is for  |arg| < 2**53 only */
4171
    }
4172
}
4173

    
4174
void helper_fcos(void)
4175
{
4176
    CPU86_LDouble fptemp;
4177

    
4178
    fptemp = ST0;
4179
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4180
        env->fpus |= 0x400;
4181
    } else {
4182
        ST0 = cos(fptemp);
4183
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4184
        /* the above code is for  |arg5 < 2**63 only */
4185
    }
4186
}
4187

    
4188
void helper_fxam_ST0(void)
4189
{
4190
    CPU86_LDoubleU temp;
4191
    int expdif;
4192

    
4193
    temp.d = ST0;
4194

    
4195
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4196
    if (SIGND(temp))
4197
        env->fpus |= 0x200; /* C1 <-- 1 */
4198

    
4199
    /* XXX: test fptags too */
4200
    expdif = EXPD(temp);
4201
    if (expdif == MAXEXPD) {
4202
#ifdef USE_X86LDOUBLE
4203
        if (MANTD(temp) == 0x8000000000000000ULL)
4204
#else
4205
        if (MANTD(temp) == 0)
4206
#endif
4207
            env->fpus |=  0x500 /*Infinity*/;
4208
        else
4209
            env->fpus |=  0x100 /*NaN*/;
4210
    } else if (expdif == 0) {
4211
        if (MANTD(temp) == 0)
4212
            env->fpus |=  0x4000 /*Zero*/;
4213
        else
4214
            env->fpus |= 0x4400 /*Denormal*/;
4215
    } else {
4216
        env->fpus |= 0x400;
4217
    }
4218
}
4219

    
4220
void helper_fstenv(target_ulong ptr, int data32)
4221
{
4222
    int fpus, fptag, exp, i;
4223
    uint64_t mant;
4224
    CPU86_LDoubleU tmp;
4225

    
4226
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4227
    fptag = 0;
4228
    for (i=7; i>=0; i--) {
4229
        fptag <<= 2;
4230
        if (env->fptags[i]) {
4231
            fptag |= 3;
4232
        } else {
4233
            tmp.d = env->fpregs[i].d;
4234
            exp = EXPD(tmp);
4235
            mant = MANTD(tmp);
4236
            if (exp == 0 && mant == 0) {
4237
                /* zero */
4238
                fptag |= 1;
4239
            } else if (exp == 0 || exp == MAXEXPD
4240
#ifdef USE_X86LDOUBLE
4241
                       || (mant & (1LL << 63)) == 0
4242
#endif
4243
                       ) {
4244
                /* NaNs, infinity, denormal */
4245
                fptag |= 2;
4246
            }
4247
        }
4248
    }
4249
    if (data32) {
4250
        /* 32 bit */
4251
        stl(ptr, env->fpuc);
4252
        stl(ptr + 4, fpus);
4253
        stl(ptr + 8, fptag);
4254
        stl(ptr + 12, 0); /* fpip */
4255
        stl(ptr + 16, 0); /* fpcs */
4256
        stl(ptr + 20, 0); /* fpoo */
4257
        stl(ptr + 24, 0); /* fpos */
4258
    } else {
4259
        /* 16 bit */
4260
        stw(ptr, env->fpuc);
4261
        stw(ptr + 2, fpus);
4262
        stw(ptr + 4, fptag);
4263
        stw(ptr + 6, 0);
4264
        stw(ptr + 8, 0);
4265
        stw(ptr + 10, 0);
4266
        stw(ptr + 12, 0);
4267
    }
4268
}
4269

    
4270
void helper_fldenv(target_ulong ptr, int data32)
4271
{
4272
    int i, fpus, fptag;
4273

    
4274
    if (data32) {
4275
        env->fpuc = lduw(ptr);
4276
        fpus = lduw(ptr + 4);
4277
        fptag = lduw(ptr + 8);
4278
    }
4279
    else {
4280
        env->fpuc = lduw(ptr);
4281
        fpus = lduw(ptr + 2);
4282
        fptag = lduw(ptr + 4);
4283
    }
4284
    env->fpstt = (fpus >> 11) & 7;
4285
    env->fpus = fpus & ~0x3800;
4286
    for(i = 0;i < 8; i++) {
4287
        env->fptags[i] = ((fptag & 3) == 3);
4288
        fptag >>= 2;
4289
    }
4290
}
4291

    
4292
void helper_fsave(target_ulong ptr, int data32)
4293
{
4294
    CPU86_LDouble tmp;
4295
    int i;
4296

    
4297
    helper_fstenv(ptr, data32);
4298

    
4299
    ptr += (14 << data32);
4300
    for(i = 0;i < 8; i++) {
4301
        tmp = ST(i);
4302
        helper_fstt(tmp, ptr);
4303
        ptr += 10;
4304
    }
4305

    
4306
    /* fninit */
4307
    env->fpus = 0;
4308
    env->fpstt = 0;
4309
    env->fpuc = 0x37f;
4310
    env->fptags[0] = 1;
4311
    env->fptags[1] = 1;
4312
    env->fptags[2] = 1;
4313
    env->fptags[3] = 1;
4314
    env->fptags[4] = 1;
4315
    env->fptags[5] = 1;
4316
    env->fptags[6] = 1;
4317
    env->fptags[7] = 1;
4318
}
4319

    
4320
void helper_frstor(target_ulong ptr, int data32)
4321
{
4322
    CPU86_LDouble tmp;
4323
    int i;
4324

    
4325
    helper_fldenv(ptr, data32);
4326
    ptr += (14 << data32);
4327

    
4328
    for(i = 0;i < 8; i++) {
4329
        tmp = helper_fldt(ptr);
4330
        ST(i) = tmp;
4331
        ptr += 10;
4332
    }
4333
}
4334

    
4335
void helper_fxsave(target_ulong ptr, int data64)
4336
{
4337
    int fpus, fptag, i, nb_xmm_regs;
4338
    CPU86_LDouble tmp;
4339
    target_ulong addr;
4340

    
4341
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4342
    fptag = 0;
4343
    for(i = 0; i < 8; i++) {
4344
        fptag |= (env->fptags[i] << i);
4345
    }
4346
    stw(ptr, env->fpuc);
4347
    stw(ptr + 2, fpus);
4348
    stw(ptr + 4, fptag ^ 0xff);
4349
#ifdef TARGET_X86_64
4350
    if (data64) {
4351
        stq(ptr + 0x08, 0); /* rip */
4352
        stq(ptr + 0x10, 0); /* rdp */
4353
    } else 
4354
#endif
4355
    {
4356
        stl(ptr + 0x08, 0); /* eip */
4357
        stl(ptr + 0x0c, 0); /* sel  */
4358
        stl(ptr + 0x10, 0); /* dp */
4359
        stl(ptr + 0x14, 0); /* sel  */
4360
    }
4361

    
4362
    addr = ptr + 0x20;
4363
    for(i = 0;i < 8; i++) {
4364
        tmp = ST(i);
4365
        helper_fstt(tmp, addr);
4366
        addr += 16;
4367
    }
4368

    
4369
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4370
        /* XXX: finish it */
4371
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4372
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4373
        if (env->hflags & HF_CS64_MASK)
4374
            nb_xmm_regs = 16;
4375
        else
4376
            nb_xmm_regs = 8;
4377
        addr = ptr + 0xa0;
4378
        /* Fast FXSAVE leaves out the XMM registers */
4379
        if (!(env->efer & MSR_EFER_FFXSR)
4380
          || (env->hflags & HF_CPL_MASK)
4381
          || !(env->hflags & HF_LMA_MASK)) {
4382
            for(i = 0; i < nb_xmm_regs; i++) {
4383
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4384
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4385
                addr += 16;
4386
            }
4387
        }
4388
    }
4389
}
4390

    
4391
void helper_fxrstor(target_ulong ptr, int data64)
4392
{
4393
    int i, fpus, fptag, nb_xmm_regs;
4394
    CPU86_LDouble tmp;
4395
    target_ulong addr;
4396

    
4397
    env->fpuc = lduw(ptr);
4398
    fpus = lduw(ptr + 2);
4399
    fptag = lduw(ptr + 4);
4400
    env->fpstt = (fpus >> 11) & 7;
4401
    env->fpus = fpus & ~0x3800;
4402
    fptag ^= 0xff;
4403
    for(i = 0;i < 8; i++) {
4404
        env->fptags[i] = ((fptag >> i) & 1);
4405
    }
4406

    
4407
    addr = ptr + 0x20;
4408
    for(i = 0;i < 8; i++) {
4409
        tmp = helper_fldt(addr);
4410
        ST(i) = tmp;
4411
        addr += 16;
4412
    }
4413

    
4414
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4415
        /* XXX: finish it */
4416
        env->mxcsr = ldl(ptr + 0x18);
4417
        //ldl(ptr + 0x1c);
4418
        if (env->hflags & HF_CS64_MASK)
4419
            nb_xmm_regs = 16;
4420
        else
4421
            nb_xmm_regs = 8;
4422
        addr = ptr + 0xa0;
4423
        /* Fast FXRESTORE leaves out the XMM registers */
4424
        if (!(env->efer & MSR_EFER_FFXSR)
4425
          || (env->hflags & HF_CPL_MASK)
4426
          || !(env->hflags & HF_LMA_MASK)) {
4427
            for(i = 0; i < nb_xmm_regs; i++) {
4428
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4429
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4430
                addr += 16;
4431
            }
4432
        }
4433
    }
4434
}
4435

    
4436
#ifndef USE_X86LDOUBLE
4437

    
4438
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4439
{
4440
    CPU86_LDoubleU temp;
4441
    int e;
4442

    
4443
    temp.d = f;
4444
    /* mantissa */
4445
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4446
    /* exponent + sign */
4447
    e = EXPD(temp) - EXPBIAS + 16383;
4448
    e |= SIGND(temp) >> 16;
4449
    *pexp = e;
4450
}
4451

    
4452
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4453
{
4454
    CPU86_LDoubleU temp;
4455
    int e;
4456
    uint64_t ll;
4457

    
4458
    /* XXX: handle overflow ? */
4459
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4460
    e |= (upper >> 4) & 0x800; /* sign */
4461
    ll = (mant >> 11) & ((1LL << 52) - 1);
4462
#ifdef __arm__
4463
    temp.l.upper = (e << 20) | (ll >> 32);
4464
    temp.l.lower = ll;
4465
#else
4466
    temp.ll = ll | ((uint64_t)e << 52);
4467
#endif
4468
    return temp.d;
4469
}
4470

    
4471
#else
4472

    
4473
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4474
{
4475
    CPU86_LDoubleU temp;
4476

    
4477
    temp.d = f;
4478
    *pmant = temp.l.lower;
4479
    *pexp = temp.l.upper;
4480
}
4481

    
4482
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4483
{
4484
    CPU86_LDoubleU temp;
4485

    
4486
    temp.l.upper = upper;
4487
    temp.l.lower = mant;
4488
    return temp.d;
4489
}
4490
#endif
4491

    
4492
#ifdef TARGET_X86_64
4493

    
4494
//#define DEBUG_MULDIV
4495

    
4496
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4497
{
4498
    *plow += a;
4499
    /* carry test */
4500
    if (*plow < a)
4501
        (*phigh)++;
4502
    *phigh += b;
4503
}
4504

    
4505
static void neg128(uint64_t *plow, uint64_t *phigh)
4506
{
4507
    *plow = ~ *plow;
4508
    *phigh = ~ *phigh;
4509
    add128(plow, phigh, 1, 0);
4510
}
4511

    
4512
/* return TRUE if overflow */
4513
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4514
{
4515
    uint64_t q, r, a1, a0;
4516
    int i, qb, ab;
4517

    
4518
    a0 = *plow;
4519
    a1 = *phigh;
4520
    if (a1 == 0) {
4521
        q = a0 / b;
4522
        r = a0 % b;
4523
        *plow = q;
4524
        *phigh = r;
4525
    } else {
4526
        if (a1 >= b)
4527
            return 1;
4528
        /* XXX: use a better algorithm */
4529
        for(i = 0; i < 64; i++) {
4530
            ab = a1 >> 63;
4531
            a1 = (a1 << 1) | (a0 >> 63);
4532
            if (ab || a1 >= b) {
4533
                a1 -= b;
4534
                qb = 1;
4535
            } else {
4536
                qb = 0;
4537
            }
4538
            a0 = (a0 << 1) | qb;
4539
        }
4540
#if defined(DEBUG_MULDIV)
4541
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4542
               *phigh, *plow, b, a0, a1);
4543
#endif
4544
        *plow = a0;
4545
        *phigh = a1;
4546
    }
4547
    return 0;
4548
}
4549

    
4550
/* return TRUE if overflow */
4551
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4552
{
4553
    int sa, sb;
4554
    sa = ((int64_t)*phigh < 0);
4555
    if (sa)
4556
        neg128(plow, phigh);
4557
    sb = (b < 0);
4558
    if (sb)
4559
        b = -b;
4560
    if (div64(plow, phigh, b) != 0)
4561
        return 1;
4562
    if (sa ^ sb) {
4563
        if (*plow > (1ULL << 63))
4564
            return 1;
4565
        *plow = - *plow;
4566
    } else {
4567
        if (*plow >= (1ULL << 63))
4568
            return 1;
4569
    }
4570
    if (sa)
4571
        *phigh = - *phigh;
4572
    return 0;
4573
}
4574

    
4575
void helper_mulq_EAX_T0(target_ulong t0)
4576
{
4577
    uint64_t r0, r1;
4578

    
4579
    mulu64(&r0, &r1, EAX, t0);
4580
    EAX = r0;
4581
    EDX = r1;
4582
    CC_DST = r0;
4583
    CC_SRC = r1;
4584
}
4585

    
4586
void helper_imulq_EAX_T0(target_ulong t0)
4587
{
4588
    uint64_t r0, r1;
4589

    
4590
    muls64(&r0, &r1, EAX, t0);
4591
    EAX = r0;
4592
    EDX = r1;
4593
    CC_DST = r0;
4594
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4595
}
4596

    
4597
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4598
{
4599
    uint64_t r0, r1;
4600

    
4601
    muls64(&r0, &r1, t0, t1);
4602
    CC_DST = r0;
4603
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4604
    return r0;
4605
}
4606

    
4607
void helper_divq_EAX(target_ulong t0)
4608
{
4609
    uint64_t r0, r1;
4610
    if (t0 == 0) {
4611
        raise_exception(EXCP00_DIVZ);
4612
    }
4613
    r0 = EAX;
4614
    r1 = EDX;
4615
    if (div64(&r0, &r1, t0))
4616
        raise_exception(EXCP00_DIVZ);
4617
    EAX = r0;
4618
    EDX = r1;
4619
}
4620

    
4621
void helper_idivq_EAX(target_ulong t0)
4622
{
4623
    uint64_t r0, r1;
4624
    if (t0 == 0) {
4625
        raise_exception(EXCP00_DIVZ);
4626
    }
4627
    r0 = EAX;
4628
    r1 = EDX;
4629
    if (idiv64(&r0, &r1, t0))
4630
        raise_exception(EXCP00_DIVZ);
4631
    EAX = r0;
4632
    EDX = r1;
4633
}
4634
#endif
4635

    
4636
static void do_hlt(void)
4637
{
4638
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4639
    env->halted = 1;
4640
    env->exception_index = EXCP_HLT;
4641
    cpu_loop_exit();
4642
}
4643

    
4644
void helper_hlt(int next_eip_addend)
4645
{
4646
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4647
    EIP += next_eip_addend;
4648
    
4649
    do_hlt();
4650
}
4651

    
4652
void helper_monitor(target_ulong ptr)
4653
{
4654
    if ((uint32_t)ECX != 0)
4655
        raise_exception(EXCP0D_GPF);
4656
    /* XXX: store address ? */
4657
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4658
}
4659

    
4660
void helper_mwait(int next_eip_addend)
4661
{
4662
    if ((uint32_t)ECX != 0)
4663
        raise_exception(EXCP0D_GPF);
4664
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4665
    EIP += next_eip_addend;
4666

    
4667
    /* XXX: not complete but not completely erroneous */
4668
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4669
        /* more than one CPU: do not sleep because another CPU may
4670
           wake this one */
4671
    } else {
4672
        do_hlt();
4673
    }
4674
}
4675

    
4676
void helper_debug(void)
4677
{
4678
    env->exception_index = EXCP_DEBUG;
4679
    cpu_loop_exit();
4680
}
4681

    
4682
void helper_reset_rf(void)
4683
{
4684
    env->eflags &= ~RF_MASK;
4685
}
4686

    
4687
void helper_raise_interrupt(int intno, int next_eip_addend)
4688
{
4689
    raise_interrupt(intno, 1, 0, next_eip_addend);
4690
}
4691

    
4692
void helper_raise_exception(int exception_index)
4693
{
4694
    raise_exception(exception_index);
4695
}
4696

    
4697
void helper_cli(void)
4698
{
4699
    env->eflags &= ~IF_MASK;
4700
}
4701

    
4702
void helper_sti(void)
4703
{
4704
    env->eflags |= IF_MASK;
4705
}
4706

    
4707
#if 0
4708
/* vm86plus instructions */
4709
void helper_cli_vm(void)
4710
{
4711
    env->eflags &= ~VIF_MASK;
4712
}
4713

4714
void helper_sti_vm(void)
4715
{
4716
    env->eflags |= VIF_MASK;
4717
    if (env->eflags & VIP_MASK) {
4718
        raise_exception(EXCP0D_GPF);
4719
    }
4720
}
4721
#endif
4722

    
4723
void helper_set_inhibit_irq(void)
4724
{
4725
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4726
}
4727

    
4728
void helper_reset_inhibit_irq(void)
4729
{
4730
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4731
}
4732

    
4733
void helper_boundw(target_ulong a0, int v)
4734
{
4735
    int low, high;
4736
    low = ldsw(a0);
4737
    high = ldsw(a0 + 2);
4738
    v = (int16_t)v;
4739
    if (v < low || v > high) {
4740
        raise_exception(EXCP05_BOUND);
4741
    }
4742
}
4743

    
4744
void helper_boundl(target_ulong a0, int v)
4745
{
4746
    int low, high;
4747
    low = ldl(a0);
4748
    high = ldl(a0 + 4);
4749
    if (v < low || v > high) {
4750
        raise_exception(EXCP05_BOUND);
4751
    }
4752
}
4753

    
4754
static float approx_rsqrt(float a)
4755
{
4756
    return 1.0 / sqrt(a);
4757
}
4758

    
4759
static float approx_rcp(float a)
4760
{
4761
    return 1.0 / a;
4762
}
4763

    
4764
#if !defined(CONFIG_USER_ONLY)
4765

    
4766
#define MMUSUFFIX _mmu
4767

    
4768
#define SHIFT 0
4769
#include "softmmu_template.h"
4770

    
4771
#define SHIFT 1
4772
#include "softmmu_template.h"
4773

    
4774
#define SHIFT 2
4775
#include "softmmu_template.h"
4776

    
4777
#define SHIFT 3
4778
#include "softmmu_template.h"
4779

    
4780
#endif
4781

    
4782
#if !defined(CONFIG_USER_ONLY)
4783
/* try to fill the TLB and return an exception if error. If retaddr is
4784
   NULL, it means that the function was called in C code (i.e. not
4785
   from generated code or from helper.c) */
4786
/* XXX: fix it to restore all registers */
4787
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4788
{
4789
    TranslationBlock *tb;
4790
    int ret;
4791
    unsigned long pc;
4792
    CPUX86State *saved_env;
4793

    
4794
    /* XXX: hack to restore env in all cases, even if not called from
4795
       generated code */
4796
    saved_env = env;
4797
    env = cpu_single_env;
4798

    
4799
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4800
    if (ret) {
4801
        if (retaddr) {
4802
            /* now we have a real cpu fault */
4803
            pc = (unsigned long)retaddr;
4804
            tb = tb_find_pc(pc);
4805
            if (tb) {
4806
                /* the PC is inside the translated code. It means that we have
4807
                   a virtual CPU fault */
4808
                cpu_restore_state(tb, env, pc, NULL);
4809
            }
4810
        }
4811
        raise_exception_err(env->exception_index, env->error_code);
4812
    }
4813
    env = saved_env;
4814
}
4815
#endif
4816

    
4817
/* Secure Virtual Machine helpers */
4818

    
4819
#if defined(CONFIG_USER_ONLY)
4820

    
4821
void helper_vmrun(int aflag, int next_eip_addend)
4822
{ 
4823
}
4824
void helper_vmmcall(void) 
4825
{ 
4826
}
4827
void helper_vmload(int aflag)
4828
{ 
4829
}
4830
void helper_vmsave(int aflag)
4831
{ 
4832
}
4833
void helper_stgi(void)
4834
{
4835
}
4836
void helper_clgi(void)
4837
{
4838
}
4839
void helper_skinit(void) 
4840
{ 
4841
}
4842
void helper_invlpga(int aflag)
4843
{ 
4844
}
4845
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4846
{ 
4847
}
4848
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4849
{
4850
}
4851

    
4852
void helper_svm_check_io(uint32_t port, uint32_t param, 
4853
                         uint32_t next_eip_addend)
4854
{
4855
}
4856
#else
4857

    
4858
static inline void svm_save_seg(a_target_phys_addr addr,
4859
                                const SegmentCache *sc)
4860
{
4861
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4862
             sc->selector);
4863
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4864
             sc->base);
4865
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4866
             sc->limit);
4867
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4868
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4869
}
4870
                                
4871
static inline void svm_load_seg(a_target_phys_addr addr, SegmentCache *sc)
4872
{
4873
    unsigned int flags;
4874

    
4875
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4876
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4877
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4878
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4879
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4880
}
4881

    
4882
static inline void svm_load_seg_cache(a_target_phys_addr addr,
4883
                                      CPUState *env, int seg_reg)
4884
{
4885
    SegmentCache sc1, *sc = &sc1;
4886
    svm_load_seg(addr, sc);
4887
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4888
                           sc->base, sc->limit, sc->flags);
4889
}
4890

    
4891
void helper_vmrun(int aflag, int next_eip_addend)
4892
{
4893
    target_ulong addr;
4894
    uint32_t event_inj;
4895
    uint32_t int_ctl;
4896

    
4897
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4898

    
4899
    if (aflag == 2)
4900
        addr = EAX;
4901
    else
4902
        addr = (uint32_t)EAX;
4903

    
4904
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4905

    
4906
    env->vm_vmcb = addr;
4907

    
4908
    /* save the current CPU state in the hsave page */
4909
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4910
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4911

    
4912
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4913
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4914

    
4915
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4916
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4917
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4918
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4919
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4920
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4921

    
4922
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4923
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4924

    
4925
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4926
                  &env->segs[R_ES]);
4927
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4928
                 &env->segs[R_CS]);
4929
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4930
                 &env->segs[R_SS]);
4931
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4932
                 &env->segs[R_DS]);
4933

    
4934
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4935
             EIP + next_eip_addend);
4936
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4937
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4938

    
4939
    /* load the interception bitmaps so we do not need to access the
4940
       vmcb in svm mode */
4941
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4942
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4943
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4944
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4945
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4946
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4947

    
4948
    /* enable intercepts */
4949
    env->hflags |= HF_SVMI_MASK;
4950

    
4951
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4952

    
4953
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4954
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4955

    
4956
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4957
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4958

    
4959
    /* clear exit_info_2 so we behave like the real hardware */
4960
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4961

    
4962
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4963
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4964
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4965
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4966
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4967
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4968
    if (int_ctl & V_INTR_MASKING_MASK) {
4969
        env->v_tpr = int_ctl & V_TPR_MASK;
4970
        env->hflags2 |= HF2_VINTR_MASK;
4971
        if (env->eflags & IF_MASK)
4972
            env->hflags2 |= HF2_HIF_MASK;
4973
    }
4974

    
4975
    cpu_load_efer(env, 
4976
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4977
    env->eflags = 0;
4978
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4979
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4980
    CC_OP = CC_OP_EFLAGS;
4981

    
4982
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4983
                       env, R_ES);
4984
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4985
                       env, R_CS);
4986
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4987
                       env, R_SS);
4988
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4989
                       env, R_DS);
4990

    
4991
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4992
    env->eip = EIP;
4993
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4994
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4995
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4996
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4997
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4998

    
4999
    /* FIXME: guest state consistency checks */
5000

    
5001
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5002
        case TLB_CONTROL_DO_NOTHING:
5003
            break;
5004
        case TLB_CONTROL_FLUSH_ALL_ASID:
5005
            /* FIXME: this is not 100% correct but should work for now */
5006
            tlb_flush(env, 1);
5007
        break;
5008
    }
5009

    
5010
    env->hflags2 |= HF2_GIF_MASK;
5011

    
5012
    if (int_ctl & V_IRQ_MASK) {
5013
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5014
    }
5015

    
5016
    /* maybe we need to inject an event */
5017
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5018
    if (event_inj & SVM_EVTINJ_VALID) {
5019
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5020
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5021
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5022

    
5023
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5024
        /* FIXME: need to implement valid_err */
5025
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5026
        case SVM_EVTINJ_TYPE_INTR:
5027
                env->exception_index = vector;
5028
                env->error_code = event_inj_err;
5029
                env->exception_is_int = 0;
5030
                env->exception_next_eip = -1;
5031
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5032
                /* XXX: is it always correct ? */
5033
                do_interrupt(vector, 0, 0, 0, 1);
5034
                break;
5035
        case SVM_EVTINJ_TYPE_NMI:
5036
                env->exception_index = EXCP02_NMI;
5037
                env->error_code = event_inj_err;
5038
                env->exception_is_int = 0;
5039
                env->exception_next_eip = EIP;
5040
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5041
                cpu_loop_exit();
5042
                break;
5043
        case SVM_EVTINJ_TYPE_EXEPT:
5044
                env->exception_index = vector;
5045
                env->error_code = event_inj_err;
5046
                env->exception_is_int = 0;
5047
                env->exception_next_eip = -1;
5048
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5049
                cpu_loop_exit();
5050
                break;
5051
        case SVM_EVTINJ_TYPE_SOFT:
5052
                env->exception_index = vector;
5053
                env->error_code = event_inj_err;
5054
                env->exception_is_int = 1;
5055
                env->exception_next_eip = EIP;
5056
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5057
                cpu_loop_exit();
5058
                break;
5059
        }
5060
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5061
    }
5062
}
5063

    
5064
void helper_vmmcall(void)
5065
{
5066
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5067
    raise_exception(EXCP06_ILLOP);
5068
}
5069

    
5070
void helper_vmload(int aflag)
5071
{
5072
    target_ulong addr;
5073
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5074

    
5075
    if (aflag == 2)
5076
        addr = EAX;
5077
    else
5078
        addr = (uint32_t)EAX;
5079

    
5080
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5081
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5082
                env->segs[R_FS].base);
5083

    
5084
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5085
                       env, R_FS);
5086
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5087
                       env, R_GS);
5088
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5089
                 &env->tr);
5090
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5091
                 &env->ldt);
5092

    
5093
#ifdef TARGET_X86_64
5094
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5095
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5096
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5097
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5098
#endif
5099
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5100
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5101
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5102
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5103
}
5104

    
5105
void helper_vmsave(int aflag)
5106
{
5107
    target_ulong addr;
5108
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5109

    
5110
    if (aflag == 2)
5111
        addr = EAX;
5112
    else
5113
        addr = (uint32_t)EAX;
5114

    
5115
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5116
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5117
                env->segs[R_FS].base);
5118

    
5119
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5120
                 &env->segs[R_FS]);
5121
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5122
                 &env->segs[R_GS]);
5123
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5124
                 &env->tr);
5125
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5126
                 &env->ldt);
5127

    
5128
#ifdef TARGET_X86_64
5129
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5130
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5131
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5132
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5133
#endif
5134
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5135
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5136
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5137
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5138
}
5139

    
5140
void helper_stgi(void)
5141
{
5142
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5143
    env->hflags2 |= HF2_GIF_MASK;
5144
}
5145

    
5146
void helper_clgi(void)
5147
{
5148
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5149
    env->hflags2 &= ~HF2_GIF_MASK;
5150
}
5151

    
5152
void helper_skinit(void)
5153
{
5154
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5155
    /* XXX: not implemented */
5156
    raise_exception(EXCP06_ILLOP);
5157
}
5158

    
5159
void helper_invlpga(int aflag)
5160
{
5161
    target_ulong addr;
5162
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5163
    
5164
    if (aflag == 2)
5165
        addr = EAX;
5166
    else
5167
        addr = (uint32_t)EAX;
5168

    
5169
    /* XXX: could use the ASID to see if it is needed to do the
5170
       flush */
5171
    tlb_flush_page(env, addr);
5172
}
5173

    
5174
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5175
{
5176
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5177
        return;
5178
    switch(type) {
5179
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5180
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5181
            helper_vmexit(type, param);
5182
        }
5183
        break;
5184
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5185
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5186
            helper_vmexit(type, param);
5187
        }
5188
        break;
5189
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5190
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5191
            helper_vmexit(type, param);
5192
        }
5193
        break;
5194
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5195
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5196
            helper_vmexit(type, param);
5197
        }
5198
        break;
5199
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5200
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5201
            helper_vmexit(type, param);
5202
        }
5203
        break;
5204
    case SVM_EXIT_MSR:
5205
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5206
            /* FIXME: this should be read in at vmrun (faster this way?) */
5207
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5208
            uint32_t t0, t1;
5209
            switch((uint32_t)ECX) {
5210
            case 0 ... 0x1fff:
5211
                t0 = (ECX * 2) % 8;
5212
                t1 = ECX / 8;
5213
                break;
5214
            case 0xc0000000 ... 0xc0001fff:
5215
                t0 = (8192 + ECX - 0xc0000000) * 2;
5216
                t1 = (t0 / 8);
5217
                t0 %= 8;
5218
                break;
5219
            case 0xc0010000 ... 0xc0011fff:
5220
                t0 = (16384 + ECX - 0xc0010000) * 2;
5221
                t1 = (t0 / 8);
5222
                t0 %= 8;
5223
                break;
5224
            default:
5225
                helper_vmexit(type, param);
5226
                t0 = 0;
5227
                t1 = 0;
5228
                break;
5229
            }
5230
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5231
                helper_vmexit(type, param);
5232
        }
5233
        break;
5234
    default:
5235
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5236
            helper_vmexit(type, param);
5237
        }
5238
        break;
5239
    }
5240
}
5241

    
5242
void helper_svm_check_io(uint32_t port, uint32_t param, 
5243
                         uint32_t next_eip_addend)
5244
{
5245
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5246
        /* FIXME: this should be read in at vmrun (faster this way?) */
5247
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5248
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5249
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5250
            /* next EIP */
5251
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5252
                     env->eip + next_eip_addend);
5253
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5254
        }
5255
    }
5256
}
5257

    
5258
/* Note: currently only 32 bits of exit_code are used */
5259
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5260
{
5261
    uint32_t int_ctl;
5262

    
5263
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5264
                exit_code, exit_info_1,
5265
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5266
                EIP);
5267

    
5268
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5269
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5270
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5271
    } else {
5272
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5273
    }
5274

    
5275
    /* Save the VM state in the vmcb */
5276
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5277
                 &env->segs[R_ES]);
5278
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5279
                 &env->segs[R_CS]);
5280
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5281
                 &env->segs[R_SS]);
5282
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5283
                 &env->segs[R_DS]);
5284

    
5285
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5286
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5287

    
5288
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5289
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5290

    
5291
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5292
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5293
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5294
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5295
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5296

    
5297
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5298
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5299
    int_ctl |= env->v_tpr & V_TPR_MASK;
5300
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5301
        int_ctl |= V_IRQ_MASK;
5302
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5303

    
5304
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5305
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5306
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5307
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5308
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5309
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5310
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5311

    
5312
    /* Reload the host state from vm_hsave */
5313
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5314
    env->hflags &= ~HF_SVMI_MASK;
5315
    env->intercept = 0;
5316
    env->intercept_exceptions = 0;
5317
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5318
    env->tsc_offset = 0;
5319

    
5320
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5321
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5322

    
5323
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5324
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5325

    
5326
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5327
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5328
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5329
    /* we need to set the efer after the crs so the hidden flags get
5330
       set properly */
5331
    cpu_load_efer(env, 
5332
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5333
    env->eflags = 0;
5334
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5335
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5336
    CC_OP = CC_OP_EFLAGS;
5337

    
5338
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5339
                       env, R_ES);
5340
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5341
                       env, R_CS);
5342
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5343
                       env, R_SS);
5344
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5345
                       env, R_DS);
5346

    
5347
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5348
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5349
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5350

    
5351
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5352
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5353

    
5354
    /* other setups */
5355
    cpu_x86_set_cpl(env, 0);
5356
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5357
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5358

    
5359
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5360
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5361
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5362
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5363

    
5364
    env->hflags2 &= ~HF2_GIF_MASK;
5365
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5366

    
5367
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5368

    
5369
    /* Clears the TSC_OFFSET inside the processor. */
5370

    
5371
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5372
       from the page table indicated the host's CR3. If the PDPEs contain
5373
       illegal state, the processor causes a shutdown. */
5374

    
5375
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5376
    env->cr[0] |= CR0_PE_MASK;
5377
    env->eflags &= ~VM_MASK;
5378

    
5379
    /* Disables all breakpoints in the host DR7 register. */
5380

    
5381
    /* Checks the reloaded host state for consistency. */
5382

    
5383
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5384
       host's code segment or non-canonical (in the case of long mode), a
5385
       #GP fault is delivered inside the host.) */
5386

    
5387
    /* remove any pending exception */
5388
    env->exception_index = -1;
5389
    env->error_code = 0;
5390
    env->old_exception = -1;
5391

    
5392
    cpu_loop_exit();
5393
}
5394

    
5395
#endif
5396

    
5397
/* MMX/SSE */
5398
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5399
void helper_enter_mmx(void)
5400
{
5401
    env->fpstt = 0;
5402
    *(uint32_t *)(env->fptags) = 0;
5403
    *(uint32_t *)(env->fptags + 4) = 0;
5404
}
5405

    
5406
void helper_emms(void)
5407
{
5408
    /* set to empty state */
5409
    *(uint32_t *)(env->fptags) = 0x01010101;
5410
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5411
}
5412

    
5413
/* XXX: suppress */
5414
void helper_movq(void *d, void *s)
5415
{
5416
    *(uint64_t *)d = *(uint64_t *)s;
5417
}
5418

    
5419
#define SHIFT 0
5420
#include "ops_sse.h"
5421

    
5422
#define SHIFT 1
5423
#include "ops_sse.h"
5424

    
5425
#define SHIFT 0
5426
#include "helper_template.h"
5427
#undef SHIFT
5428

    
5429
#define SHIFT 1
5430
#include "helper_template.h"
5431
#undef SHIFT
5432

    
5433
#define SHIFT 2
5434
#include "helper_template.h"
5435
#undef SHIFT
5436

    
5437
#ifdef TARGET_X86_64
5438

    
5439
#define SHIFT 3
5440
#include "helper_template.h"
5441
#undef SHIFT
5442

    
5443
#endif
5444

    
5445
/* bit operations */
5446
target_ulong helper_bsf(target_ulong t0)
5447
{
5448
    int count;
5449
    target_ulong res;
5450

    
5451
    res = t0;
5452
    count = 0;
5453
    while ((res & 1) == 0) {
5454
        count++;
5455
        res >>= 1;
5456
    }
5457
    return count;
5458
}
5459

    
5460
target_ulong helper_bsr(target_ulong t0)
5461
{
5462
    int count;
5463
    target_ulong res, mask;
5464
    
5465
    res = t0;
5466
    count = TARGET_LONG_BITS - 1;
5467
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5468
    while ((res & mask) == 0) {
5469
        count--;
5470
        res <<= 1;
5471
    }
5472
    return count;
5473
}
5474

    
5475

    
5476
static int compute_all_eflags(void)
5477
{
5478
    return CC_SRC;
5479
}
5480

    
5481
static int compute_c_eflags(void)
5482
{
5483
    return CC_SRC & CC_C;
5484
}
5485

    
5486
uint32_t helper_cc_compute_all(int op)
5487
{
5488
    switch (op) {
5489
    default: /* should never happen */ return 0;
5490

    
5491
    case CC_OP_EFLAGS: return compute_all_eflags();
5492

    
5493
    case CC_OP_MULB: return compute_all_mulb();
5494
    case CC_OP_MULW: return compute_all_mulw();
5495
    case CC_OP_MULL: return compute_all_mull();
5496

    
5497
    case CC_OP_ADDB: return compute_all_addb();
5498
    case CC_OP_ADDW: return compute_all_addw();
5499
    case CC_OP_ADDL: return compute_all_addl();
5500

    
5501
    case CC_OP_ADCB: return compute_all_adcb();
5502
    case CC_OP_ADCW: return compute_all_adcw();
5503
    case CC_OP_ADCL: return compute_all_adcl();
5504

    
5505
    case CC_OP_SUBB: return compute_all_subb();
5506
    case CC_OP_SUBW: return compute_all_subw();
5507
    case CC_OP_SUBL: return compute_all_subl();
5508

    
5509
    case CC_OP_SBBB: return compute_all_sbbb();
5510
    case CC_OP_SBBW: return compute_all_sbbw();
5511
    case CC_OP_SBBL: return compute_all_sbbl();
5512

    
5513
    case CC_OP_LOGICB: return compute_all_logicb();
5514
    case CC_OP_LOGICW: return compute_all_logicw();
5515
    case CC_OP_LOGICL: return compute_all_logicl();
5516

    
5517
    case CC_OP_INCB: return compute_all_incb();
5518
    case CC_OP_INCW: return compute_all_incw();
5519
    case CC_OP_INCL: return compute_all_incl();
5520

    
5521
    case CC_OP_DECB: return compute_all_decb();
5522
    case CC_OP_DECW: return compute_all_decw();
5523
    case CC_OP_DECL: return compute_all_decl();
5524

    
5525
    case CC_OP_SHLB: return compute_all_shlb();
5526
    case CC_OP_SHLW: return compute_all_shlw();
5527
    case CC_OP_SHLL: return compute_all_shll();
5528

    
5529
    case CC_OP_SARB: return compute_all_sarb();
5530
    case CC_OP_SARW: return compute_all_sarw();
5531
    case CC_OP_SARL: return compute_all_sarl();
5532

    
5533
#ifdef TARGET_X86_64
5534
    case CC_OP_MULQ: return compute_all_mulq();
5535

    
5536
    case CC_OP_ADDQ: return compute_all_addq();
5537

    
5538
    case CC_OP_ADCQ: return compute_all_adcq();
5539

    
5540
    case CC_OP_SUBQ: return compute_all_subq();
5541

    
5542
    case CC_OP_SBBQ: return compute_all_sbbq();
5543

    
5544
    case CC_OP_LOGICQ: return compute_all_logicq();
5545

    
5546
    case CC_OP_INCQ: return compute_all_incq();
5547

    
5548
    case CC_OP_DECQ: return compute_all_decq();
5549

    
5550
    case CC_OP_SHLQ: return compute_all_shlq();
5551

    
5552
    case CC_OP_SARQ: return compute_all_sarq();
5553
#endif
5554
    }
5555
}
5556

    
5557
uint32_t helper_cc_compute_c(int op)
5558
{
5559
    switch (op) {
5560
    default: /* should never happen */ return 0;
5561

    
5562
    case CC_OP_EFLAGS: return compute_c_eflags();
5563

    
5564
    case CC_OP_MULB: return compute_c_mull();
5565
    case CC_OP_MULW: return compute_c_mull();
5566
    case CC_OP_MULL: return compute_c_mull();
5567

    
5568
    case CC_OP_ADDB: return compute_c_addb();
5569
    case CC_OP_ADDW: return compute_c_addw();
5570
    case CC_OP_ADDL: return compute_c_addl();
5571

    
5572
    case CC_OP_ADCB: return compute_c_adcb();
5573
    case CC_OP_ADCW: return compute_c_adcw();
5574
    case CC_OP_ADCL: return compute_c_adcl();
5575

    
5576
    case CC_OP_SUBB: return compute_c_subb();
5577
    case CC_OP_SUBW: return compute_c_subw();
5578
    case CC_OP_SUBL: return compute_c_subl();
5579

    
5580
    case CC_OP_SBBB: return compute_c_sbbb();
5581
    case CC_OP_SBBW: return compute_c_sbbw();
5582
    case CC_OP_SBBL: return compute_c_sbbl();
5583

    
5584
    case CC_OP_LOGICB: return compute_c_logicb();
5585
    case CC_OP_LOGICW: return compute_c_logicw();
5586
    case CC_OP_LOGICL: return compute_c_logicl();
5587

    
5588
    case CC_OP_INCB: return compute_c_incl();
5589
    case CC_OP_INCW: return compute_c_incl();
5590
    case CC_OP_INCL: return compute_c_incl();
5591

    
5592
    case CC_OP_DECB: return compute_c_incl();
5593
    case CC_OP_DECW: return compute_c_incl();
5594
    case CC_OP_DECL: return compute_c_incl();
5595

    
5596
    case CC_OP_SHLB: return compute_c_shlb();
5597
    case CC_OP_SHLW: return compute_c_shlw();
5598
    case CC_OP_SHLL: return compute_c_shll();
5599

    
5600
    case CC_OP_SARB: return compute_c_sarl();
5601
    case CC_OP_SARW: return compute_c_sarl();
5602
    case CC_OP_SARL: return compute_c_sarl();
5603

    
5604
#ifdef TARGET_X86_64
5605
    case CC_OP_MULQ: return compute_c_mull();
5606

    
5607
    case CC_OP_ADDQ: return compute_c_addq();
5608

    
5609
    case CC_OP_ADCQ: return compute_c_adcq();
5610

    
5611
    case CC_OP_SUBQ: return compute_c_subq();
5612

    
5613
    case CC_OP_SBBQ: return compute_c_sbbq();
5614

    
5615
    case CC_OP_LOGICQ: return compute_c_logicq();
5616

    
5617
    case CC_OP_INCQ: return compute_c_incl();
5618

    
5619
    case CC_OP_DECQ: return compute_c_incl();
5620

    
5621
    case CC_OP_SHLQ: return compute_c_shlq();
5622

    
5623
    case CC_OP_SARQ: return compute_c_sarl();
5624
#endif
5625
    }
5626
}