Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 4581cbcd

History | View | Annotate | Download (159.3 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "exec.h"
21
#include "exec-all.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24

    
25
//#define DEBUG_PCALL
26

    
27

    
28
#ifdef DEBUG_PCALL
29
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30
#  define LOG_PCALL_STATE(env) \
31
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32
#else
33
#  define LOG_PCALL(...) do { } while (0)
34
#  define LOG_PCALL_STATE(env) do { } while (0)
35
#endif
36

    
37

    
38
#if 0
39
#define raise_exception_err(a, b)\
40
do {\
41
    qemu_log("raise_exception line=%d\n", __LINE__);\
42
    (raise_exception_err)(a, b);\
43
} while (0)
44
#endif
45

    
46
static const uint8_t parity_table[256] = {
47
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79
};
80

    
81
/* modulo 17 table */
82
static const uint8_t rclw_table[32] = {
83
    0, 1, 2, 3, 4, 5, 6, 7,
84
    8, 9,10,11,12,13,14,15,
85
   16, 0, 1, 2, 3, 4, 5, 6,
86
    7, 8, 9,10,11,12,13,14,
87
};
88

    
89
/* modulo 9 table */
90
static const uint8_t rclb_table[32] = {
91
    0, 1, 2, 3, 4, 5, 6, 7,
92
    8, 0, 1, 2, 3, 4, 5, 6,
93
    7, 8, 0, 1, 2, 3, 4, 5,
94
    6, 7, 8, 0, 1, 2, 3, 4,
95
};
96

    
97
static const CPU86_LDouble f15rk[7] =
98
{
99
    0.00000000000000000000L,
100
    1.00000000000000000000L,
101
    3.14159265358979323851L,  /*pi*/
102
    0.30102999566398119523L,  /*lg2*/
103
    0.69314718055994530943L,  /*ln2*/
104
    1.44269504088896340739L,  /*l2e*/
105
    3.32192809488736234781L,  /*l2t*/
106
};
107

    
108
/* broken thread support */
109

    
110
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111

    
112
void helper_lock(void)
113
{
114
    spin_lock(&global_cpu_lock);
115
}
116

    
117
void helper_unlock(void)
118
{
119
    spin_unlock(&global_cpu_lock);
120
}
121

    
122
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123
{
124
    load_eflags(t0, update_mask);
125
}
126

    
127
target_ulong helper_read_eflags(void)
128
{
129
    uint32_t eflags;
130
    eflags = helper_cc_compute_all(CC_OP);
131
    eflags |= (DF & DF_MASK);
132
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133
    return eflags;
134
}
135

    
136
/* return non zero if error */
137
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138
                               int selector)
139
{
140
    SegmentCache *dt;
141
    int index;
142
    target_ulong ptr;
143

    
144
    if (selector & 0x4)
145
        dt = &env->ldt;
146
    else
147
        dt = &env->gdt;
148
    index = selector & ~7;
149
    if ((index + 7) > dt->limit)
150
        return -1;
151
    ptr = dt->base + index;
152
    *e1_ptr = ldl_kernel(ptr);
153
    *e2_ptr = ldl_kernel(ptr + 4);
154
    return 0;
155
}
156

    
157
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158
{
159
    unsigned int limit;
160
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161
    if (e2 & DESC_G_MASK)
162
        limit = (limit << 12) | 0xfff;
163
    return limit;
164
}
165

    
166
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167
{
168
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169
}
170

    
171
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172
{
173
    sc->base = get_seg_base(e1, e2);
174
    sc->limit = get_seg_limit(e1, e2);
175
    sc->flags = e2;
176
}
177

    
178
/* init the segment cache in vm86 mode. */
179
static inline void load_seg_vm(int seg, int selector)
180
{
181
    selector &= 0xffff;
182
    cpu_x86_load_seg_cache(env, seg, selector,
183
                           (selector << 4), 0xffff, 0);
184
}
185

    
186
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187
                                       uint32_t *esp_ptr, int dpl)
188
{
189
    int type, index, shift;
190

    
191
#if 0
192
    {
193
        int i;
194
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195
        for(i=0;i<env->tr.limit;i++) {
196
            printf("%02x ", env->tr.base[i]);
197
            if ((i & 7) == 7) printf("\n");
198
        }
199
        printf("\n");
200
    }
201
#endif
202

    
203
    if (!(env->tr.flags & DESC_P_MASK))
204
        cpu_abort(env, "invalid tss");
205
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206
    if ((type & 7) != 1)
207
        cpu_abort(env, "invalid tss type");
208
    shift = type >> 3;
209
    index = (dpl * 4 + 2) << shift;
210
    if (index + (4 << shift) - 1 > env->tr.limit)
211
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212
    if (shift == 0) {
213
        *esp_ptr = lduw_kernel(env->tr.base + index);
214
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215
    } else {
216
        *esp_ptr = ldl_kernel(env->tr.base + index);
217
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218
    }
219
}
220

    
221
/* XXX: merge with load_seg() */
222
static void tss_load_seg(int seg_reg, int selector)
223
{
224
    uint32_t e1, e2;
225
    int rpl, dpl, cpl;
226

    
227
    if ((selector & 0xfffc) != 0) {
228
        if (load_segment(&e1, &e2, selector) != 0)
229
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230
        if (!(e2 & DESC_S_MASK))
231
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232
        rpl = selector & 3;
233
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234
        cpl = env->hflags & HF_CPL_MASK;
235
        if (seg_reg == R_CS) {
236
            if (!(e2 & DESC_CS_MASK))
237
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238
            /* XXX: is it correct ? */
239
            if (dpl != rpl)
240
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241
            if ((e2 & DESC_C_MASK) && dpl > rpl)
242
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243
        } else if (seg_reg == R_SS) {
244
            /* SS must be writable data */
245
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247
            if (dpl != cpl || dpl != rpl)
248
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249
        } else {
250
            /* not readable code */
251
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253
            /* if data or non conforming code, checks the rights */
254
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255
                if (dpl < cpl || dpl < rpl)
256
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257
            }
258
        }
259
        if (!(e2 & DESC_P_MASK))
260
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261
        cpu_x86_load_seg_cache(env, seg_reg, selector,
262
                       get_seg_base(e1, e2),
263
                       get_seg_limit(e1, e2),
264
                       e2);
265
    } else {
266
        if (seg_reg == R_SS || seg_reg == R_CS)
267
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268
    }
269
}
270

    
271
#define SWITCH_TSS_JMP  0
272
#define SWITCH_TSS_IRET 1
273
#define SWITCH_TSS_CALL 2
274

    
275
/* XXX: restore CPU state in registers (PowerPC case) */
276
static void switch_tss(int tss_selector,
277
                       uint32_t e1, uint32_t e2, int source,
278
                       uint32_t next_eip)
279
{
280
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281
    target_ulong tss_base;
282
    uint32_t new_regs[8], new_segs[6];
283
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284
    uint32_t old_eflags, eflags_mask;
285
    SegmentCache *dt;
286
    int index;
287
    target_ulong ptr;
288

    
289
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291

    
292
    /* if task gate, we read the TSS segment and we load it */
293
    if (type == 5) {
294
        if (!(e2 & DESC_P_MASK))
295
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296
        tss_selector = e1 >> 16;
297
        if (tss_selector & 4)
298
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299
        if (load_segment(&e1, &e2, tss_selector) != 0)
300
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301
        if (e2 & DESC_S_MASK)
302
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304
        if ((type & 7) != 1)
305
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306
    }
307

    
308
    if (!(e2 & DESC_P_MASK))
309
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310

    
311
    if (type & 8)
312
        tss_limit_max = 103;
313
    else
314
        tss_limit_max = 43;
315
    tss_limit = get_seg_limit(e1, e2);
316
    tss_base = get_seg_base(e1, e2);
317
    if ((tss_selector & 4) != 0 ||
318
        tss_limit < tss_limit_max)
319
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321
    if (old_type & 8)
322
        old_tss_limit_max = 103;
323
    else
324
        old_tss_limit_max = 43;
325

    
326
    /* read all the registers from the new TSS */
327
    if (type & 8) {
328
        /* 32 bit */
329
        new_cr3 = ldl_kernel(tss_base + 0x1c);
330
        new_eip = ldl_kernel(tss_base + 0x20);
331
        new_eflags = ldl_kernel(tss_base + 0x24);
332
        for(i = 0; i < 8; i++)
333
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334
        for(i = 0; i < 6; i++)
335
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336
        new_ldt = lduw_kernel(tss_base + 0x60);
337
        new_trap = ldl_kernel(tss_base + 0x64);
338
    } else {
339
        /* 16 bit */
340
        new_cr3 = 0;
341
        new_eip = lduw_kernel(tss_base + 0x0e);
342
        new_eflags = lduw_kernel(tss_base + 0x10);
343
        for(i = 0; i < 8; i++)
344
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345
        for(i = 0; i < 4; i++)
346
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347
        new_ldt = lduw_kernel(tss_base + 0x2a);
348
        new_segs[R_FS] = 0;
349
        new_segs[R_GS] = 0;
350
        new_trap = 0;
351
    }
352
    /* XXX: avoid a compiler warning, see
353
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
354
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
355
    (void)new_trap;
356

    
357
    /* NOTE: we must avoid memory exceptions during the task switch,
358
       so we make dummy accesses before */
359
    /* XXX: it can still fail in some cases, so a bigger hack is
360
       necessary to valid the TLB after having done the accesses */
361

    
362
    v1 = ldub_kernel(env->tr.base);
363
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
364
    stb_kernel(env->tr.base, v1);
365
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
366

    
367
    /* clear busy bit (it is restartable) */
368
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
369
        target_ulong ptr;
370
        uint32_t e2;
371
        ptr = env->gdt.base + (env->tr.selector & ~7);
372
        e2 = ldl_kernel(ptr + 4);
373
        e2 &= ~DESC_TSS_BUSY_MASK;
374
        stl_kernel(ptr + 4, e2);
375
    }
376
    old_eflags = compute_eflags();
377
    if (source == SWITCH_TSS_IRET)
378
        old_eflags &= ~NT_MASK;
379

    
380
    /* save the current state in the old TSS */
381
    if (type & 8) {
382
        /* 32 bit */
383
        stl_kernel(env->tr.base + 0x20, next_eip);
384
        stl_kernel(env->tr.base + 0x24, old_eflags);
385
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
386
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
387
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
388
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
389
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
390
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
391
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
392
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
393
        for(i = 0; i < 6; i++)
394
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
395
    } else {
396
        /* 16 bit */
397
        stw_kernel(env->tr.base + 0x0e, next_eip);
398
        stw_kernel(env->tr.base + 0x10, old_eflags);
399
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
400
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
401
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
402
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
403
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
404
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
405
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
406
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
407
        for(i = 0; i < 4; i++)
408
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
409
    }
410

    
411
    /* now if an exception occurs, it will occurs in the next task
412
       context */
413

    
414
    if (source == SWITCH_TSS_CALL) {
415
        stw_kernel(tss_base, env->tr.selector);
416
        new_eflags |= NT_MASK;
417
    }
418

    
419
    /* set busy bit */
420
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
421
        target_ulong ptr;
422
        uint32_t e2;
423
        ptr = env->gdt.base + (tss_selector & ~7);
424
        e2 = ldl_kernel(ptr + 4);
425
        e2 |= DESC_TSS_BUSY_MASK;
426
        stl_kernel(ptr + 4, e2);
427
    }
428

    
429
    /* set the new CPU state */
430
    /* from this point, any exception which occurs can give problems */
431
    env->cr[0] |= CR0_TS_MASK;
432
    env->hflags |= HF_TS_MASK;
433
    env->tr.selector = tss_selector;
434
    env->tr.base = tss_base;
435
    env->tr.limit = tss_limit;
436
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
437

    
438
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
439
        cpu_x86_update_cr3(env, new_cr3);
440
    }
441

    
442
    /* load all registers without an exception, then reload them with
443
       possible exception */
444
    env->eip = new_eip;
445
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
446
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
447
    if (!(type & 8))
448
        eflags_mask &= 0xffff;
449
    load_eflags(new_eflags, eflags_mask);
450
    /* XXX: what to do in 16 bit case ? */
451
    EAX = new_regs[0];
452
    ECX = new_regs[1];
453
    EDX = new_regs[2];
454
    EBX = new_regs[3];
455
    ESP = new_regs[4];
456
    EBP = new_regs[5];
457
    ESI = new_regs[6];
458
    EDI = new_regs[7];
459
    if (new_eflags & VM_MASK) {
460
        for(i = 0; i < 6; i++)
461
            load_seg_vm(i, new_segs[i]);
462
        /* in vm86, CPL is always 3 */
463
        cpu_x86_set_cpl(env, 3);
464
    } else {
465
        /* CPL is set the RPL of CS */
466
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
467
        /* first just selectors as the rest may trigger exceptions */
468
        for(i = 0; i < 6; i++)
469
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
470
    }
471

    
472
    env->ldt.selector = new_ldt & ~4;
473
    env->ldt.base = 0;
474
    env->ldt.limit = 0;
475
    env->ldt.flags = 0;
476

    
477
    /* load the LDT */
478
    if (new_ldt & 4)
479
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480

    
481
    if ((new_ldt & 0xfffc) != 0) {
482
        dt = &env->gdt;
483
        index = new_ldt & ~7;
484
        if ((index + 7) > dt->limit)
485
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486
        ptr = dt->base + index;
487
        e1 = ldl_kernel(ptr);
488
        e2 = ldl_kernel(ptr + 4);
489
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
490
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491
        if (!(e2 & DESC_P_MASK))
492
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
494
    }
495

    
496
    /* load the segments */
497
    if (!(new_eflags & VM_MASK)) {
498
        tss_load_seg(R_CS, new_segs[R_CS]);
499
        tss_load_seg(R_SS, new_segs[R_SS]);
500
        tss_load_seg(R_ES, new_segs[R_ES]);
501
        tss_load_seg(R_DS, new_segs[R_DS]);
502
        tss_load_seg(R_FS, new_segs[R_FS]);
503
        tss_load_seg(R_GS, new_segs[R_GS]);
504
    }
505

    
506
    /* check that EIP is in the CS segment limits */
507
    if (new_eip > env->segs[R_CS].limit) {
508
        /* XXX: different exception if CALL ? */
509
        raise_exception_err(EXCP0D_GPF, 0);
510
    }
511

    
512
#ifndef CONFIG_USER_ONLY
513
    /* reset local breakpoints */
514
    if (env->dr[7] & 0x55) {
515
        for (i = 0; i < 4; i++) {
516
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
517
                hw_breakpoint_remove(env, i);
518
        }
519
        env->dr[7] &= ~0x55;
520
    }
521
#endif
522
}
523

    
524
/* check if Port I/O is allowed in TSS */
525
static inline void check_io(int addr, int size)
526
{
527
    int io_offset, val, mask;
528

    
529
    /* TSS must be a valid 32 bit one */
530
    if (!(env->tr.flags & DESC_P_MASK) ||
531
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
532
        env->tr.limit < 103)
533
        goto fail;
534
    io_offset = lduw_kernel(env->tr.base + 0x66);
535
    io_offset += (addr >> 3);
536
    /* Note: the check needs two bytes */
537
    if ((io_offset + 1) > env->tr.limit)
538
        goto fail;
539
    val = lduw_kernel(env->tr.base + io_offset);
540
    val >>= (addr & 7);
541
    mask = (1 << size) - 1;
542
    /* all bits must be zero to allow the I/O */
543
    if ((val & mask) != 0) {
544
    fail:
545
        raise_exception_err(EXCP0D_GPF, 0);
546
    }
547
}
548

    
549
void helper_check_iob(uint32_t t0)
550
{
551
    check_io(t0, 1);
552
}
553

    
554
void helper_check_iow(uint32_t t0)
555
{
556
    check_io(t0, 2);
557
}
558

    
559
void helper_check_iol(uint32_t t0)
560
{
561
    check_io(t0, 4);
562
}
563

    
564
void helper_outb(uint32_t port, uint32_t data)
565
{
566
    cpu_outb(port, data & 0xff);
567
}
568

    
569
target_ulong helper_inb(uint32_t port)
570
{
571
    return cpu_inb(port);
572
}
573

    
574
void helper_outw(uint32_t port, uint32_t data)
575
{
576
    cpu_outw(port, data & 0xffff);
577
}
578

    
579
target_ulong helper_inw(uint32_t port)
580
{
581
    return cpu_inw(port);
582
}
583

    
584
void helper_outl(uint32_t port, uint32_t data)
585
{
586
    cpu_outl(port, data);
587
}
588

    
589
target_ulong helper_inl(uint32_t port)
590
{
591
    return cpu_inl(port);
592
}
593

    
594
static inline unsigned int get_sp_mask(unsigned int e2)
595
{
596
    if (e2 & DESC_B_MASK)
597
        return 0xffffffff;
598
    else
599
        return 0xffff;
600
}
601

    
602
static int exeption_has_error_code(int intno)
603
{
604
        switch(intno) {
605
        case 8:
606
        case 10:
607
        case 11:
608
        case 12:
609
        case 13:
610
        case 14:
611
        case 17:
612
            return 1;
613
        }
614
        return 0;
615
}
616

    
617
#ifdef TARGET_X86_64
618
#define SET_ESP(val, sp_mask)\
619
do {\
620
    if ((sp_mask) == 0xffff)\
621
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622
    else if ((sp_mask) == 0xffffffffLL)\
623
        ESP = (uint32_t)(val);\
624
    else\
625
        ESP = (val);\
626
} while (0)
627
#else
628
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
629
#endif
630

    
631
/* in 64-bit machines, this can overflow. So this segment addition macro
632
 * can be used to trim the value to 32-bit whenever needed */
633
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
634

    
635
/* XXX: add a is_user flag to have proper security support */
636
#define PUSHW(ssp, sp, sp_mask, val)\
637
{\
638
    sp -= 2;\
639
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
640
}
641

    
642
#define PUSHL(ssp, sp, sp_mask, val)\
643
{\
644
    sp -= 4;\
645
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
646
}
647

    
648
#define POPW(ssp, sp, sp_mask, val)\
649
{\
650
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
651
    sp += 2;\
652
}
653

    
654
#define POPL(ssp, sp, sp_mask, val)\
655
{\
656
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
657
    sp += 4;\
658
}
659

    
660
/* protected mode interrupt */
661
static void do_interrupt_protected(int intno, int is_int, int error_code,
662
                                   unsigned int next_eip, int is_hw)
663
{
664
    SegmentCache *dt;
665
    target_ulong ptr, ssp;
666
    int type, dpl, selector, ss_dpl, cpl;
667
    int has_error_code, new_stack, shift;
668
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
669
    uint32_t old_eip, sp_mask;
670

    
671
    has_error_code = 0;
672
    if (!is_int && !is_hw)
673
        has_error_code = exeption_has_error_code(intno);
674
    if (is_int)
675
        old_eip = next_eip;
676
    else
677
        old_eip = env->eip;
678

    
679
    dt = &env->idt;
680
    if (intno * 8 + 7 > dt->limit)
681
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
682
    ptr = dt->base + intno * 8;
683
    e1 = ldl_kernel(ptr);
684
    e2 = ldl_kernel(ptr + 4);
685
    /* check gate type */
686
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
687
    switch(type) {
688
    case 5: /* task gate */
689
        /* must do that check here to return the correct error code */
690
        if (!(e2 & DESC_P_MASK))
691
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
692
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
693
        if (has_error_code) {
694
            int type;
695
            uint32_t mask;
696
            /* push the error code */
697
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
698
            shift = type >> 3;
699
            if (env->segs[R_SS].flags & DESC_B_MASK)
700
                mask = 0xffffffff;
701
            else
702
                mask = 0xffff;
703
            esp = (ESP - (2 << shift)) & mask;
704
            ssp = env->segs[R_SS].base + esp;
705
            if (shift)
706
                stl_kernel(ssp, error_code);
707
            else
708
                stw_kernel(ssp, error_code);
709
            SET_ESP(esp, mask);
710
        }
711
        return;
712
    case 6: /* 286 interrupt gate */
713
    case 7: /* 286 trap gate */
714
    case 14: /* 386 interrupt gate */
715
    case 15: /* 386 trap gate */
716
        break;
717
    default:
718
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719
        break;
720
    }
721
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
722
    cpl = env->hflags & HF_CPL_MASK;
723
    /* check privilege if software int */
724
    if (is_int && dpl < cpl)
725
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
726
    /* check valid bit */
727
    if (!(e2 & DESC_P_MASK))
728
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
729
    selector = e1 >> 16;
730
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
731
    if ((selector & 0xfffc) == 0)
732
        raise_exception_err(EXCP0D_GPF, 0);
733

    
734
    if (load_segment(&e1, &e2, selector) != 0)
735
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
737
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
739
    if (dpl > cpl)
740
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741
    if (!(e2 & DESC_P_MASK))
742
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
743
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
744
        /* to inner privilege */
745
        get_ss_esp_from_tss(&ss, &esp, dpl);
746
        if ((ss & 0xfffc) == 0)
747
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748
        if ((ss & 3) != dpl)
749
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
751
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
753
        if (ss_dpl != dpl)
754
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755
        if (!(ss_e2 & DESC_S_MASK) ||
756
            (ss_e2 & DESC_CS_MASK) ||
757
            !(ss_e2 & DESC_W_MASK))
758
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759
        if (!(ss_e2 & DESC_P_MASK))
760
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761
        new_stack = 1;
762
        sp_mask = get_sp_mask(ss_e2);
763
        ssp = get_seg_base(ss_e1, ss_e2);
764
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
765
        /* to same privilege */
766
        if (env->eflags & VM_MASK)
767
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768
        new_stack = 0;
769
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
770
        ssp = env->segs[R_SS].base;
771
        esp = ESP;
772
        dpl = cpl;
773
    } else {
774
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
775
        new_stack = 0; /* avoid warning */
776
        sp_mask = 0; /* avoid warning */
777
        ssp = 0; /* avoid warning */
778
        esp = 0; /* avoid warning */
779
    }
780

    
781
    shift = type >> 3;
782

    
783
#if 0
784
    /* XXX: check that enough room is available */
785
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786
    if (env->eflags & VM_MASK)
787
        push_size += 8;
788
    push_size <<= shift;
789
#endif
790
    if (shift == 1) {
791
        if (new_stack) {
792
            if (env->eflags & VM_MASK) {
793
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
794
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
795
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
796
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
797
            }
798
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
799
            PUSHL(ssp, esp, sp_mask, ESP);
800
        }
801
        PUSHL(ssp, esp, sp_mask, compute_eflags());
802
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
803
        PUSHL(ssp, esp, sp_mask, old_eip);
804
        if (has_error_code) {
805
            PUSHL(ssp, esp, sp_mask, error_code);
806
        }
807
    } else {
808
        if (new_stack) {
809
            if (env->eflags & VM_MASK) {
810
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
811
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
812
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
813
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
814
            }
815
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
816
            PUSHW(ssp, esp, sp_mask, ESP);
817
        }
818
        PUSHW(ssp, esp, sp_mask, compute_eflags());
819
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
820
        PUSHW(ssp, esp, sp_mask, old_eip);
821
        if (has_error_code) {
822
            PUSHW(ssp, esp, sp_mask, error_code);
823
        }
824
    }
825

    
826
    if (new_stack) {
827
        if (env->eflags & VM_MASK) {
828
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
829
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
830
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
831
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
832
        }
833
        ss = (ss & ~3) | dpl;
834
        cpu_x86_load_seg_cache(env, R_SS, ss,
835
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
836
    }
837
    SET_ESP(esp, sp_mask);
838

    
839
    selector = (selector & ~3) | dpl;
840
    cpu_x86_load_seg_cache(env, R_CS, selector,
841
                   get_seg_base(e1, e2),
842
                   get_seg_limit(e1, e2),
843
                   e2);
844
    cpu_x86_set_cpl(env, dpl);
845
    env->eip = offset;
846

    
847
    /* interrupt gate clear IF mask */
848
    if ((type & 1) == 0) {
849
        env->eflags &= ~IF_MASK;
850
    }
851
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852
}
853

    
854
#ifdef TARGET_X86_64
855

    
856
#define PUSHQ(sp, val)\
857
{\
858
    sp -= 8;\
859
    stq_kernel(sp, (val));\
860
}
861

    
862
#define POPQ(sp, val)\
863
{\
864
    val = ldq_kernel(sp);\
865
    sp += 8;\
866
}
867

    
868
static inline target_ulong get_rsp_from_tss(int level)
869
{
870
    int index;
871

    
872
#if 0
873
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
874
           env->tr.base, env->tr.limit);
875
#endif
876

    
877
    if (!(env->tr.flags & DESC_P_MASK))
878
        cpu_abort(env, "invalid tss");
879
    index = 8 * level + 4;
880
    if ((index + 7) > env->tr.limit)
881
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
882
    return ldq_kernel(env->tr.base + index);
883
}
884

    
885
/* 64 bit interrupt */
886
static void do_interrupt64(int intno, int is_int, int error_code,
887
                           target_ulong next_eip, int is_hw)
888
{
889
    SegmentCache *dt;
890
    target_ulong ptr;
891
    int type, dpl, selector, cpl, ist;
892
    int has_error_code, new_stack;
893
    uint32_t e1, e2, e3, ss;
894
    target_ulong old_eip, esp, offset;
895

    
896
    has_error_code = 0;
897
    if (!is_int && !is_hw)
898
        has_error_code = exeption_has_error_code(intno);
899
    if (is_int)
900
        old_eip = next_eip;
901
    else
902
        old_eip = env->eip;
903

    
904
    dt = &env->idt;
905
    if (intno * 16 + 15 > dt->limit)
906
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
907
    ptr = dt->base + intno * 16;
908
    e1 = ldl_kernel(ptr);
909
    e2 = ldl_kernel(ptr + 4);
910
    e3 = ldl_kernel(ptr + 8);
911
    /* check gate type */
912
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
913
    switch(type) {
914
    case 14: /* 386 interrupt gate */
915
    case 15: /* 386 trap gate */
916
        break;
917
    default:
918
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919
        break;
920
    }
921
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922
    cpl = env->hflags & HF_CPL_MASK;
923
    /* check privilege if software int */
924
    if (is_int && dpl < cpl)
925
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
926
    /* check valid bit */
927
    if (!(e2 & DESC_P_MASK))
928
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
929
    selector = e1 >> 16;
930
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
931
    ist = e2 & 7;
932
    if ((selector & 0xfffc) == 0)
933
        raise_exception_err(EXCP0D_GPF, 0);
934

    
935
    if (load_segment(&e1, &e2, selector) != 0)
936
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
938
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940
    if (dpl > cpl)
941
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942
    if (!(e2 & DESC_P_MASK))
943
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
944
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
945
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
947
        /* to inner privilege */
948
        if (ist != 0)
949
            esp = get_rsp_from_tss(ist + 3);
950
        else
951
            esp = get_rsp_from_tss(dpl);
952
        esp &= ~0xfLL; /* align stack */
953
        ss = 0;
954
        new_stack = 1;
955
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
956
        /* to same privilege */
957
        if (env->eflags & VM_MASK)
958
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
959
        new_stack = 0;
960
        if (ist != 0)
961
            esp = get_rsp_from_tss(ist + 3);
962
        else
963
            esp = ESP;
964
        esp &= ~0xfLL; /* align stack */
965
        dpl = cpl;
966
    } else {
967
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968
        new_stack = 0; /* avoid warning */
969
        esp = 0; /* avoid warning */
970
    }
971

    
972
    PUSHQ(esp, env->segs[R_SS].selector);
973
    PUSHQ(esp, ESP);
974
    PUSHQ(esp, compute_eflags());
975
    PUSHQ(esp, env->segs[R_CS].selector);
976
    PUSHQ(esp, old_eip);
977
    if (has_error_code) {
978
        PUSHQ(esp, error_code);
979
    }
980

    
981
    if (new_stack) {
982
        ss = 0 | dpl;
983
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
984
    }
985
    ESP = esp;
986

    
987
    selector = (selector & ~3) | dpl;
988
    cpu_x86_load_seg_cache(env, R_CS, selector,
989
                   get_seg_base(e1, e2),
990
                   get_seg_limit(e1, e2),
991
                   e2);
992
    cpu_x86_set_cpl(env, dpl);
993
    env->eip = offset;
994

    
995
    /* interrupt gate clear IF mask */
996
    if ((type & 1) == 0) {
997
        env->eflags &= ~IF_MASK;
998
    }
999
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1000
}
1001
#endif
1002

    
1003
#ifdef TARGET_X86_64
1004
#if defined(CONFIG_USER_ONLY)
1005
void helper_syscall(int next_eip_addend)
1006
{
1007
    env->exception_index = EXCP_SYSCALL;
1008
    env->exception_next_eip = env->eip + next_eip_addend;
1009
    cpu_loop_exit();
1010
}
1011
#else
1012
void helper_syscall(int next_eip_addend)
1013
{
1014
    int selector;
1015

    
1016
    if (!(env->efer & MSR_EFER_SCE)) {
1017
        raise_exception_err(EXCP06_ILLOP, 0);
1018
    }
1019
    selector = (env->star >> 32) & 0xffff;
1020
    if (env->hflags & HF_LMA_MASK) {
1021
        int code64;
1022

    
1023
        ECX = env->eip + next_eip_addend;
1024
        env->regs[11] = compute_eflags();
1025

    
1026
        code64 = env->hflags & HF_CS64_MASK;
1027

    
1028
        cpu_x86_set_cpl(env, 0);
1029
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030
                           0, 0xffffffff,
1031
                               DESC_G_MASK | DESC_P_MASK |
1032
                               DESC_S_MASK |
1033
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1034
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035
                               0, 0xffffffff,
1036
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037
                               DESC_S_MASK |
1038
                               DESC_W_MASK | DESC_A_MASK);
1039
        env->eflags &= ~env->fmask;
1040
        load_eflags(env->eflags, 0);
1041
        if (code64)
1042
            env->eip = env->lstar;
1043
        else
1044
            env->eip = env->cstar;
1045
    } else {
1046
        ECX = (uint32_t)(env->eip + next_eip_addend);
1047

    
1048
        cpu_x86_set_cpl(env, 0);
1049
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1050
                           0, 0xffffffff,
1051
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052
                               DESC_S_MASK |
1053
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1055
                               0, 0xffffffff,
1056
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057
                               DESC_S_MASK |
1058
                               DESC_W_MASK | DESC_A_MASK);
1059
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1060
        env->eip = (uint32_t)env->star;
1061
    }
1062
}
1063
#endif
1064
#endif
1065

    
1066
#ifdef TARGET_X86_64
1067
void helper_sysret(int dflag)
1068
{
1069
    int cpl, selector;
1070

    
1071
    if (!(env->efer & MSR_EFER_SCE)) {
1072
        raise_exception_err(EXCP06_ILLOP, 0);
1073
    }
1074
    cpl = env->hflags & HF_CPL_MASK;
1075
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1076
        raise_exception_err(EXCP0D_GPF, 0);
1077
    }
1078
    selector = (env->star >> 48) & 0xffff;
1079
    if (env->hflags & HF_LMA_MASK) {
1080
        if (dflag == 2) {
1081
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1082
                                   0, 0xffffffff,
1083
                                   DESC_G_MASK | DESC_P_MASK |
1084
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1086
                                   DESC_L_MASK);
1087
            env->eip = ECX;
1088
        } else {
1089
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090
                                   0, 0xffffffff,
1091
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1094
            env->eip = (uint32_t)ECX;
1095
        }
1096
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1097
                               0, 0xffffffff,
1098
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1100
                               DESC_W_MASK | DESC_A_MASK);
1101
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1102
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1103
        cpu_x86_set_cpl(env, 3);
1104
    } else {
1105
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1106
                               0, 0xffffffff,
1107
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1110
        env->eip = (uint32_t)ECX;
1111
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1112
                               0, 0xffffffff,
1113
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1114
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1115
                               DESC_W_MASK | DESC_A_MASK);
1116
        env->eflags |= IF_MASK;
1117
        cpu_x86_set_cpl(env, 3);
1118
    }
1119
}
1120
#endif
1121

    
1122
/* real mode interrupt */
1123
static void do_interrupt_real(int intno, int is_int, int error_code,
1124
                              unsigned int next_eip)
1125
{
1126
    SegmentCache *dt;
1127
    target_ulong ptr, ssp;
1128
    int selector;
1129
    uint32_t offset, esp;
1130
    uint32_t old_cs, old_eip;
1131

    
1132
    /* real mode (simpler !) */
1133
    dt = &env->idt;
1134
    if (intno * 4 + 3 > dt->limit)
1135
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1136
    ptr = dt->base + intno * 4;
1137
    offset = lduw_kernel(ptr);
1138
    selector = lduw_kernel(ptr + 2);
1139
    esp = ESP;
1140
    ssp = env->segs[R_SS].base;
1141
    if (is_int)
1142
        old_eip = next_eip;
1143
    else
1144
        old_eip = env->eip;
1145
    old_cs = env->segs[R_CS].selector;
1146
    /* XXX: use SS segment size ? */
1147
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1148
    PUSHW(ssp, esp, 0xffff, old_cs);
1149
    PUSHW(ssp, esp, 0xffff, old_eip);
1150

    
1151
    /* update processor state */
1152
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1153
    env->eip = offset;
1154
    env->segs[R_CS].selector = selector;
1155
    env->segs[R_CS].base = (selector << 4);
1156
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1157
}
1158

    
1159
/* fake user mode interrupt */
1160
void do_interrupt_user(int intno, int is_int, int error_code,
1161
                       target_ulong next_eip)
1162
{
1163
    SegmentCache *dt;
1164
    target_ulong ptr;
1165
    int dpl, cpl, shift;
1166
    uint32_t e2;
1167

    
1168
    dt = &env->idt;
1169
    if (env->hflags & HF_LMA_MASK) {
1170
        shift = 4;
1171
    } else {
1172
        shift = 3;
1173
    }
1174
    ptr = dt->base + (intno << shift);
1175
    e2 = ldl_kernel(ptr + 4);
1176

    
1177
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178
    cpl = env->hflags & HF_CPL_MASK;
1179
    /* check privilege if software int */
1180
    if (is_int && dpl < cpl)
1181
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1182

    
1183
    /* Since we emulate only user space, we cannot do more than
1184
       exiting the emulation with the suitable exception and error
1185
       code */
1186
    if (is_int)
1187
        EIP = next_eip;
1188
}
1189

    
1190
#if !defined(CONFIG_USER_ONLY)
1191
static void handle_even_inj(int intno, int is_int, int error_code,
1192
                int is_hw, int rm)
1193
{
1194
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1195
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1196
            int type;
1197
            if (is_int)
1198
                    type = SVM_EVTINJ_TYPE_SOFT;
1199
            else
1200
                    type = SVM_EVTINJ_TYPE_EXEPT;
1201
            event_inj = intno | type | SVM_EVTINJ_VALID;
1202
            if (!rm && exeption_has_error_code(intno)) {
1203
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1204
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1205
            }
1206
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207
    }
1208
}
1209
#endif
1210

    
1211
/*
1212
 * Begin execution of an interruption. is_int is TRUE if coming from
1213
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214
 * instruction. It is only relevant if is_int is TRUE.
1215
 */
1216
void do_interrupt(int intno, int is_int, int error_code,
1217
                  target_ulong next_eip, int is_hw)
1218
{
1219
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1220
        if ((env->cr[0] & CR0_PE_MASK)) {
1221
            static int count;
1222
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1223
                    count, intno, error_code, is_int,
1224
                    env->hflags & HF_CPL_MASK,
1225
                    env->segs[R_CS].selector, EIP,
1226
                    (int)env->segs[R_CS].base + EIP,
1227
                    env->segs[R_SS].selector, ESP);
1228
            if (intno == 0x0e) {
1229
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1230
            } else {
1231
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1232
            }
1233
            qemu_log("\n");
1234
            log_cpu_state(env, X86_DUMP_CCOP);
1235
#if 0
1236
            {
1237
                int i;
1238
                target_ulong ptr;
1239
                qemu_log("       code=");
1240
                ptr = env->segs[R_CS].base + env->eip;
1241
                for(i = 0; i < 16; i++) {
1242
                    qemu_log(" %02x", ldub(ptr + i));
1243
                }
1244
                qemu_log("\n");
1245
            }
1246
#endif
1247
            count++;
1248
        }
1249
    }
1250
    if (env->cr[0] & CR0_PE_MASK) {
1251
#if !defined(CONFIG_USER_ONLY)
1252
        if (env->hflags & HF_SVMI_MASK)
1253
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1254
#endif
1255
#ifdef TARGET_X86_64
1256
        if (env->hflags & HF_LMA_MASK) {
1257
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258
        } else
1259
#endif
1260
        {
1261
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262
        }
1263
    } else {
1264
#if !defined(CONFIG_USER_ONLY)
1265
        if (env->hflags & HF_SVMI_MASK)
1266
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1267
#endif
1268
        do_interrupt_real(intno, is_int, error_code, next_eip);
1269
    }
1270

    
1271
#if !defined(CONFIG_USER_ONLY)
1272
    if (env->hflags & HF_SVMI_MASK) {
1273
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1274
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1275
    }
1276
#endif
1277
}
1278

    
1279
/* This should come from sysemu.h - if we could include it here... */
1280
void qemu_system_reset_request(void);
1281

    
1282
/*
1283
 * Check nested exceptions and change to double or triple fault if
1284
 * needed. It should only be called, if this is not an interrupt.
1285
 * Returns the new exception number.
1286
 */
1287
static int check_exception(int intno, int *error_code)
1288
{
1289
    int first_contributory = env->old_exception == 0 ||
1290
                              (env->old_exception >= 10 &&
1291
                               env->old_exception <= 13);
1292
    int second_contributory = intno == 0 ||
1293
                               (intno >= 10 && intno <= 13);
1294

    
1295
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1296
                env->old_exception, intno);
1297

    
1298
#if !defined(CONFIG_USER_ONLY)
1299
    if (env->old_exception == EXCP08_DBLE) {
1300
        if (env->hflags & HF_SVMI_MASK)
1301
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1302

    
1303
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1304

    
1305
        qemu_system_reset_request();
1306
        return EXCP_HLT;
1307
    }
1308
#endif
1309

    
1310
    if ((first_contributory && second_contributory)
1311
        || (env->old_exception == EXCP0E_PAGE &&
1312
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1313
        intno = EXCP08_DBLE;
1314
        *error_code = 0;
1315
    }
1316

    
1317
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1318
        (intno == EXCP08_DBLE))
1319
        env->old_exception = intno;
1320

    
1321
    return intno;
1322
}
1323

    
1324
/*
1325
 * Signal an interruption. It is executed in the main CPU loop.
1326
 * is_int is TRUE if coming from the int instruction. next_eip is the
1327
 * EIP value AFTER the interrupt instruction. It is only relevant if
1328
 * is_int is TRUE.
1329
 */
1330
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1331
                                          int next_eip_addend)
1332
{
1333
    if (!is_int) {
1334
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1335
        intno = check_exception(intno, &error_code);
1336
    } else {
1337
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1338
    }
1339

    
1340
    env->exception_index = intno;
1341
    env->error_code = error_code;
1342
    env->exception_is_int = is_int;
1343
    env->exception_next_eip = env->eip + next_eip_addend;
1344
    cpu_loop_exit();
1345
}
1346

    
1347
/* shortcuts to generate exceptions */
1348

    
1349
void raise_exception_err(int exception_index, int error_code)
1350
{
1351
    raise_interrupt(exception_index, 0, error_code, 0);
1352
}
1353

    
1354
void raise_exception(int exception_index)
1355
{
1356
    raise_interrupt(exception_index, 0, 0, 0);
1357
}
1358

    
1359
void raise_exception_env(int exception_index, CPUState *nenv)
1360
{
1361
    env = nenv;
1362
    raise_exception(exception_index);
1363
}
1364
/* SMM support */
1365

    
1366
#if defined(CONFIG_USER_ONLY)
1367

    
1368
void do_smm_enter(void)
1369
{
1370
}
1371

    
1372
void helper_rsm(void)
1373
{
1374
}
1375

    
1376
#else
1377

    
1378
#ifdef TARGET_X86_64
1379
#define SMM_REVISION_ID 0x00020064
1380
#else
1381
#define SMM_REVISION_ID 0x00020000
1382
#endif
1383

    
1384
void do_smm_enter(void)
1385
{
1386
    target_ulong sm_state;
1387
    SegmentCache *dt;
1388
    int i, offset;
1389

    
1390
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392

    
1393
    env->hflags |= HF_SMM_MASK;
1394
    cpu_smm_update(env);
1395

    
1396
    sm_state = env->smbase + 0x8000;
1397

    
1398
#ifdef TARGET_X86_64
1399
    for(i = 0; i < 6; i++) {
1400
        dt = &env->segs[i];
1401
        offset = 0x7e00 + i * 16;
1402
        stw_phys(sm_state + offset, dt->selector);
1403
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404
        stl_phys(sm_state + offset + 4, dt->limit);
1405
        stq_phys(sm_state + offset + 8, dt->base);
1406
    }
1407

    
1408
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1409
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410

    
1411
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1413
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415

    
1416
    stq_phys(sm_state + 0x7e88, env->idt.base);
1417
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1418

    
1419
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1420
    stq_phys(sm_state + 0x7e98, env->tr.base);
1421
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1422
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423

    
1424
    stq_phys(sm_state + 0x7ed0, env->efer);
1425

    
1426
    stq_phys(sm_state + 0x7ff8, EAX);
1427
    stq_phys(sm_state + 0x7ff0, ECX);
1428
    stq_phys(sm_state + 0x7fe8, EDX);
1429
    stq_phys(sm_state + 0x7fe0, EBX);
1430
    stq_phys(sm_state + 0x7fd8, ESP);
1431
    stq_phys(sm_state + 0x7fd0, EBP);
1432
    stq_phys(sm_state + 0x7fc8, ESI);
1433
    stq_phys(sm_state + 0x7fc0, EDI);
1434
    for(i = 8; i < 16; i++)
1435
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436
    stq_phys(sm_state + 0x7f78, env->eip);
1437
    stl_phys(sm_state + 0x7f70, compute_eflags());
1438
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1439
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1440

    
1441
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1442
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1443
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1444

    
1445
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446
    stl_phys(sm_state + 0x7f00, env->smbase);
1447
#else
1448
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1451
    stl_phys(sm_state + 0x7ff0, env->eip);
1452
    stl_phys(sm_state + 0x7fec, EDI);
1453
    stl_phys(sm_state + 0x7fe8, ESI);
1454
    stl_phys(sm_state + 0x7fe4, EBP);
1455
    stl_phys(sm_state + 0x7fe0, ESP);
1456
    stl_phys(sm_state + 0x7fdc, EBX);
1457
    stl_phys(sm_state + 0x7fd8, EDX);
1458
    stl_phys(sm_state + 0x7fd4, ECX);
1459
    stl_phys(sm_state + 0x7fd0, EAX);
1460
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462

    
1463
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464
    stl_phys(sm_state + 0x7f64, env->tr.base);
1465
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1466
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467

    
1468
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1470
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472

    
1473
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1474
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475

    
1476
    stl_phys(sm_state + 0x7f58, env->idt.base);
1477
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1478

    
1479
    for(i = 0; i < 6; i++) {
1480
        dt = &env->segs[i];
1481
        if (i < 3)
1482
            offset = 0x7f84 + i * 12;
1483
        else
1484
            offset = 0x7f2c + (i - 3) * 12;
1485
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486
        stl_phys(sm_state + offset + 8, dt->base);
1487
        stl_phys(sm_state + offset + 4, dt->limit);
1488
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489
    }
1490
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1491

    
1492
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493
    stl_phys(sm_state + 0x7ef8, env->smbase);
1494
#endif
1495
    /* init SMM cpu state */
1496

    
1497
#ifdef TARGET_X86_64
1498
    cpu_load_efer(env, 0);
1499
#endif
1500
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501
    env->eip = 0x00008000;
1502
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503
                           0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509

    
1510
    cpu_x86_update_cr0(env,
1511
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512
    cpu_x86_update_cr4(env, 0);
1513
    env->dr[7] = 0x00000400;
1514
    CC_OP = CC_OP_EFLAGS;
1515
}
1516

    
1517
void helper_rsm(void)
1518
{
1519
    target_ulong sm_state;
1520
    int i, offset;
1521
    uint32_t val;
1522

    
1523
    sm_state = env->smbase + 0x8000;
1524
#ifdef TARGET_X86_64
1525
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526

    
1527
    for(i = 0; i < 6; i++) {
1528
        offset = 0x7e00 + i * 16;
1529
        cpu_x86_load_seg_cache(env, i,
1530
                               lduw_phys(sm_state + offset),
1531
                               ldq_phys(sm_state + offset + 8),
1532
                               ldl_phys(sm_state + offset + 4),
1533
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534
    }
1535

    
1536
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538

    
1539
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543

    
1544
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1545
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546

    
1547
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1549
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551

    
1552
    EAX = ldq_phys(sm_state + 0x7ff8);
1553
    ECX = ldq_phys(sm_state + 0x7ff0);
1554
    EDX = ldq_phys(sm_state + 0x7fe8);
1555
    EBX = ldq_phys(sm_state + 0x7fe0);
1556
    ESP = ldq_phys(sm_state + 0x7fd8);
1557
    EBP = ldq_phys(sm_state + 0x7fd0);
1558
    ESI = ldq_phys(sm_state + 0x7fc8);
1559
    EDI = ldq_phys(sm_state + 0x7fc0);
1560
    for(i = 8; i < 16; i++)
1561
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562
    env->eip = ldq_phys(sm_state + 0x7f78);
1563
    load_eflags(ldl_phys(sm_state + 0x7f70),
1564
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567

    
1568
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571

    
1572
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573
    if (val & 0x20000) {
1574
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575
    }
1576
#else
1577
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1580
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581
    env->eip = ldl_phys(sm_state + 0x7ff0);
1582
    EDI = ldl_phys(sm_state + 0x7fec);
1583
    ESI = ldl_phys(sm_state + 0x7fe8);
1584
    EBP = ldl_phys(sm_state + 0x7fe4);
1585
    ESP = ldl_phys(sm_state + 0x7fe0);
1586
    EBX = ldl_phys(sm_state + 0x7fdc);
1587
    EDX = ldl_phys(sm_state + 0x7fd8);
1588
    ECX = ldl_phys(sm_state + 0x7fd4);
1589
    EAX = ldl_phys(sm_state + 0x7fd0);
1590
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592

    
1593
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1595
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597

    
1598
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602

    
1603
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605

    
1606
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1607
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608

    
1609
    for(i = 0; i < 6; i++) {
1610
        if (i < 3)
1611
            offset = 0x7f84 + i * 12;
1612
        else
1613
            offset = 0x7f2c + (i - 3) * 12;
1614
        cpu_x86_load_seg_cache(env, i,
1615
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616
                               ldl_phys(sm_state + offset + 8),
1617
                               ldl_phys(sm_state + offset + 4),
1618
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619
    }
1620
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621

    
1622
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623
    if (val & 0x20000) {
1624
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625
    }
1626
#endif
1627
    CC_OP = CC_OP_EFLAGS;
1628
    env->hflags &= ~HF_SMM_MASK;
1629
    cpu_smm_update(env);
1630

    
1631
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1633
}
1634

    
1635
#endif /* !CONFIG_USER_ONLY */
1636

    
1637

    
1638
/* division, flags are undefined */
1639

    
1640
void helper_divb_AL(target_ulong t0)
1641
{
1642
    unsigned int num, den, q, r;
1643

    
1644
    num = (EAX & 0xffff);
1645
    den = (t0 & 0xff);
1646
    if (den == 0) {
1647
        raise_exception(EXCP00_DIVZ);
1648
    }
1649
    q = (num / den);
1650
    if (q > 0xff)
1651
        raise_exception(EXCP00_DIVZ);
1652
    q &= 0xff;
1653
    r = (num % den) & 0xff;
1654
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1655
}
1656

    
1657
void helper_idivb_AL(target_ulong t0)
1658
{
1659
    int num, den, q, r;
1660

    
1661
    num = (int16_t)EAX;
1662
    den = (int8_t)t0;
1663
    if (den == 0) {
1664
        raise_exception(EXCP00_DIVZ);
1665
    }
1666
    q = (num / den);
1667
    if (q != (int8_t)q)
1668
        raise_exception(EXCP00_DIVZ);
1669
    q &= 0xff;
1670
    r = (num % den) & 0xff;
1671
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1672
}
1673

    
1674
void helper_divw_AX(target_ulong t0)
1675
{
1676
    unsigned int num, den, q, r;
1677

    
1678
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679
    den = (t0 & 0xffff);
1680
    if (den == 0) {
1681
        raise_exception(EXCP00_DIVZ);
1682
    }
1683
    q = (num / den);
1684
    if (q > 0xffff)
1685
        raise_exception(EXCP00_DIVZ);
1686
    q &= 0xffff;
1687
    r = (num % den) & 0xffff;
1688
    EAX = (EAX & ~0xffff) | q;
1689
    EDX = (EDX & ~0xffff) | r;
1690
}
1691

    
1692
void helper_idivw_AX(target_ulong t0)
1693
{
1694
    int num, den, q, r;
1695

    
1696
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697
    den = (int16_t)t0;
1698
    if (den == 0) {
1699
        raise_exception(EXCP00_DIVZ);
1700
    }
1701
    q = (num / den);
1702
    if (q != (int16_t)q)
1703
        raise_exception(EXCP00_DIVZ);
1704
    q &= 0xffff;
1705
    r = (num % den) & 0xffff;
1706
    EAX = (EAX & ~0xffff) | q;
1707
    EDX = (EDX & ~0xffff) | r;
1708
}
1709

    
1710
void helper_divl_EAX(target_ulong t0)
1711
{
1712
    unsigned int den, r;
1713
    uint64_t num, q;
1714

    
1715
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716
    den = t0;
1717
    if (den == 0) {
1718
        raise_exception(EXCP00_DIVZ);
1719
    }
1720
    q = (num / den);
1721
    r = (num % den);
1722
    if (q > 0xffffffff)
1723
        raise_exception(EXCP00_DIVZ);
1724
    EAX = (uint32_t)q;
1725
    EDX = (uint32_t)r;
1726
}
1727

    
1728
void helper_idivl_EAX(target_ulong t0)
1729
{
1730
    int den, r;
1731
    int64_t num, q;
1732

    
1733
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734
    den = t0;
1735
    if (den == 0) {
1736
        raise_exception(EXCP00_DIVZ);
1737
    }
1738
    q = (num / den);
1739
    r = (num % den);
1740
    if (q != (int32_t)q)
1741
        raise_exception(EXCP00_DIVZ);
1742
    EAX = (uint32_t)q;
1743
    EDX = (uint32_t)r;
1744
}
1745

    
1746
/* bcd */
1747

    
1748
/* XXX: exception */
1749
void helper_aam(int base)
1750
{
1751
    int al, ah;
1752
    al = EAX & 0xff;
1753
    ah = al / base;
1754
    al = al % base;
1755
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1756
    CC_DST = al;
1757
}
1758

    
1759
void helper_aad(int base)
1760
{
1761
    int al, ah;
1762
    al = EAX & 0xff;
1763
    ah = (EAX >> 8) & 0xff;
1764
    al = ((ah * base) + al) & 0xff;
1765
    EAX = (EAX & ~0xffff) | al;
1766
    CC_DST = al;
1767
}
1768

    
1769
void helper_aaa(void)
1770
{
1771
    int icarry;
1772
    int al, ah, af;
1773
    int eflags;
1774

    
1775
    eflags = helper_cc_compute_all(CC_OP);
1776
    af = eflags & CC_A;
1777
    al = EAX & 0xff;
1778
    ah = (EAX >> 8) & 0xff;
1779

    
1780
    icarry = (al > 0xf9);
1781
    if (((al & 0x0f) > 9 ) || af) {
1782
        al = (al + 6) & 0x0f;
1783
        ah = (ah + 1 + icarry) & 0xff;
1784
        eflags |= CC_C | CC_A;
1785
    } else {
1786
        eflags &= ~(CC_C | CC_A);
1787
        al &= 0x0f;
1788
    }
1789
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1790
    CC_SRC = eflags;
1791
}
1792

    
1793
void helper_aas(void)
1794
{
1795
    int icarry;
1796
    int al, ah, af;
1797
    int eflags;
1798

    
1799
    eflags = helper_cc_compute_all(CC_OP);
1800
    af = eflags & CC_A;
1801
    al = EAX & 0xff;
1802
    ah = (EAX >> 8) & 0xff;
1803

    
1804
    icarry = (al < 6);
1805
    if (((al & 0x0f) > 9 ) || af) {
1806
        al = (al - 6) & 0x0f;
1807
        ah = (ah - 1 - icarry) & 0xff;
1808
        eflags |= CC_C | CC_A;
1809
    } else {
1810
        eflags &= ~(CC_C | CC_A);
1811
        al &= 0x0f;
1812
    }
1813
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1814
    CC_SRC = eflags;
1815
}
1816

    
1817
void helper_daa(void)
1818
{
1819
    int al, af, cf;
1820
    int eflags;
1821

    
1822
    eflags = helper_cc_compute_all(CC_OP);
1823
    cf = eflags & CC_C;
1824
    af = eflags & CC_A;
1825
    al = EAX & 0xff;
1826

    
1827
    eflags = 0;
1828
    if (((al & 0x0f) > 9 ) || af) {
1829
        al = (al + 6) & 0xff;
1830
        eflags |= CC_A;
1831
    }
1832
    if ((al > 0x9f) || cf) {
1833
        al = (al + 0x60) & 0xff;
1834
        eflags |= CC_C;
1835
    }
1836
    EAX = (EAX & ~0xff) | al;
1837
    /* well, speed is not an issue here, so we compute the flags by hand */
1838
    eflags |= (al == 0) << 6; /* zf */
1839
    eflags |= parity_table[al]; /* pf */
1840
    eflags |= (al & 0x80); /* sf */
1841
    CC_SRC = eflags;
1842
}
1843

    
1844
void helper_das(void)
1845
{
1846
    int al, al1, af, cf;
1847
    int eflags;
1848

    
1849
    eflags = helper_cc_compute_all(CC_OP);
1850
    cf = eflags & CC_C;
1851
    af = eflags & CC_A;
1852
    al = EAX & 0xff;
1853

    
1854
    eflags = 0;
1855
    al1 = al;
1856
    if (((al & 0x0f) > 9 ) || af) {
1857
        eflags |= CC_A;
1858
        if (al < 6 || cf)
1859
            eflags |= CC_C;
1860
        al = (al - 6) & 0xff;
1861
    }
1862
    if ((al1 > 0x99) || cf) {
1863
        al = (al - 0x60) & 0xff;
1864
        eflags |= CC_C;
1865
    }
1866
    EAX = (EAX & ~0xff) | al;
1867
    /* well, speed is not an issue here, so we compute the flags by hand */
1868
    eflags |= (al == 0) << 6; /* zf */
1869
    eflags |= parity_table[al]; /* pf */
1870
    eflags |= (al & 0x80); /* sf */
1871
    CC_SRC = eflags;
1872
}
1873

    
1874
void helper_into(int next_eip_addend)
1875
{
1876
    int eflags;
1877
    eflags = helper_cc_compute_all(CC_OP);
1878
    if (eflags & CC_O) {
1879
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880
    }
1881
}
1882

    
1883
void helper_cmpxchg8b(target_ulong a0)
1884
{
1885
    uint64_t d;
1886
    int eflags;
1887

    
1888
    eflags = helper_cc_compute_all(CC_OP);
1889
    d = ldq(a0);
1890
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892
        eflags |= CC_Z;
1893
    } else {
1894
        /* always do the store */
1895
        stq(a0, d); 
1896
        EDX = (uint32_t)(d >> 32);
1897
        EAX = (uint32_t)d;
1898
        eflags &= ~CC_Z;
1899
    }
1900
    CC_SRC = eflags;
1901
}
1902

    
1903
#ifdef TARGET_X86_64
1904
void helper_cmpxchg16b(target_ulong a0)
1905
{
1906
    uint64_t d0, d1;
1907
    int eflags;
1908

    
1909
    if ((a0 & 0xf) != 0)
1910
        raise_exception(EXCP0D_GPF);
1911
    eflags = helper_cc_compute_all(CC_OP);
1912
    d0 = ldq(a0);
1913
    d1 = ldq(a0 + 8);
1914
    if (d0 == EAX && d1 == EDX) {
1915
        stq(a0, EBX);
1916
        stq(a0 + 8, ECX);
1917
        eflags |= CC_Z;
1918
    } else {
1919
        /* always do the store */
1920
        stq(a0, d0); 
1921
        stq(a0 + 8, d1); 
1922
        EDX = d1;
1923
        EAX = d0;
1924
        eflags &= ~CC_Z;
1925
    }
1926
    CC_SRC = eflags;
1927
}
1928
#endif
1929

    
1930
void helper_single_step(void)
1931
{
1932
#ifndef CONFIG_USER_ONLY
1933
    check_hw_breakpoints(env, 1);
1934
    env->dr[6] |= DR6_BS;
1935
#endif
1936
    raise_exception(EXCP01_DB);
1937
}
1938

    
1939
void helper_cpuid(void)
1940
{
1941
    uint32_t eax, ebx, ecx, edx;
1942

    
1943
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944

    
1945
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1946
    EAX = eax;
1947
    EBX = ebx;
1948
    ECX = ecx;
1949
    EDX = edx;
1950
}
1951

    
1952
void helper_enter_level(int level, int data32, target_ulong t1)
1953
{
1954
    target_ulong ssp;
1955
    uint32_t esp_mask, esp, ebp;
1956

    
1957
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958
    ssp = env->segs[R_SS].base;
1959
    ebp = EBP;
1960
    esp = ESP;
1961
    if (data32) {
1962
        /* 32 bit */
1963
        esp -= 4;
1964
        while (--level) {
1965
            esp -= 4;
1966
            ebp -= 4;
1967
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968
        }
1969
        esp -= 4;
1970
        stl(ssp + (esp & esp_mask), t1);
1971
    } else {
1972
        /* 16 bit */
1973
        esp -= 2;
1974
        while (--level) {
1975
            esp -= 2;
1976
            ebp -= 2;
1977
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978
        }
1979
        esp -= 2;
1980
        stw(ssp + (esp & esp_mask), t1);
1981
    }
1982
}
1983

    
1984
#ifdef TARGET_X86_64
1985
void helper_enter64_level(int level, int data64, target_ulong t1)
1986
{
1987
    target_ulong esp, ebp;
1988
    ebp = EBP;
1989
    esp = ESP;
1990

    
1991
    if (data64) {
1992
        /* 64 bit */
1993
        esp -= 8;
1994
        while (--level) {
1995
            esp -= 8;
1996
            ebp -= 8;
1997
            stq(esp, ldq(ebp));
1998
        }
1999
        esp -= 8;
2000
        stq(esp, t1);
2001
    } else {
2002
        /* 16 bit */
2003
        esp -= 2;
2004
        while (--level) {
2005
            esp -= 2;
2006
            ebp -= 2;
2007
            stw(esp, lduw(ebp));
2008
        }
2009
        esp -= 2;
2010
        stw(esp, t1);
2011
    }
2012
}
2013
#endif
2014

    
2015
void helper_lldt(int selector)
2016
{
2017
    SegmentCache *dt;
2018
    uint32_t e1, e2;
2019
    int index, entry_limit;
2020
    target_ulong ptr;
2021

    
2022
    selector &= 0xffff;
2023
    if ((selector & 0xfffc) == 0) {
2024
        /* XXX: NULL selector case: invalid LDT */
2025
        env->ldt.base = 0;
2026
        env->ldt.limit = 0;
2027
    } else {
2028
        if (selector & 0x4)
2029
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030
        dt = &env->gdt;
2031
        index = selector & ~7;
2032
#ifdef TARGET_X86_64
2033
        if (env->hflags & HF_LMA_MASK)
2034
            entry_limit = 15;
2035
        else
2036
#endif
2037
            entry_limit = 7;
2038
        if ((index + entry_limit) > dt->limit)
2039
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040
        ptr = dt->base + index;
2041
        e1 = ldl_kernel(ptr);
2042
        e2 = ldl_kernel(ptr + 4);
2043
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045
        if (!(e2 & DESC_P_MASK))
2046
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047
#ifdef TARGET_X86_64
2048
        if (env->hflags & HF_LMA_MASK) {
2049
            uint32_t e3;
2050
            e3 = ldl_kernel(ptr + 8);
2051
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052
            env->ldt.base |= (target_ulong)e3 << 32;
2053
        } else
2054
#endif
2055
        {
2056
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057
        }
2058
    }
2059
    env->ldt.selector = selector;
2060
}
2061

    
2062
void helper_ltr(int selector)
2063
{
2064
    SegmentCache *dt;
2065
    uint32_t e1, e2;
2066
    int index, type, entry_limit;
2067
    target_ulong ptr;
2068

    
2069
    selector &= 0xffff;
2070
    if ((selector & 0xfffc) == 0) {
2071
        /* NULL selector case: invalid TR */
2072
        env->tr.base = 0;
2073
        env->tr.limit = 0;
2074
        env->tr.flags = 0;
2075
    } else {
2076
        if (selector & 0x4)
2077
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078
        dt = &env->gdt;
2079
        index = selector & ~7;
2080
#ifdef TARGET_X86_64
2081
        if (env->hflags & HF_LMA_MASK)
2082
            entry_limit = 15;
2083
        else
2084
#endif
2085
            entry_limit = 7;
2086
        if ((index + entry_limit) > dt->limit)
2087
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088
        ptr = dt->base + index;
2089
        e1 = ldl_kernel(ptr);
2090
        e2 = ldl_kernel(ptr + 4);
2091
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092
        if ((e2 & DESC_S_MASK) ||
2093
            (type != 1 && type != 9))
2094
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095
        if (!(e2 & DESC_P_MASK))
2096
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097
#ifdef TARGET_X86_64
2098
        if (env->hflags & HF_LMA_MASK) {
2099
            uint32_t e3, e4;
2100
            e3 = ldl_kernel(ptr + 8);
2101
            e4 = ldl_kernel(ptr + 12);
2102
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2105
            env->tr.base |= (target_ulong)e3 << 32;
2106
        } else
2107
#endif
2108
        {
2109
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2110
        }
2111
        e2 |= DESC_TSS_BUSY_MASK;
2112
        stl_kernel(ptr + 4, e2);
2113
    }
2114
    env->tr.selector = selector;
2115
}
2116

    
2117
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118
void helper_load_seg(int seg_reg, int selector)
2119
{
2120
    uint32_t e1, e2;
2121
    int cpl, dpl, rpl;
2122
    SegmentCache *dt;
2123
    int index;
2124
    target_ulong ptr;
2125

    
2126
    selector &= 0xffff;
2127
    cpl = env->hflags & HF_CPL_MASK;
2128
    if ((selector & 0xfffc) == 0) {
2129
        /* null selector case */
2130
        if (seg_reg == R_SS
2131
#ifdef TARGET_X86_64
2132
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133
#endif
2134
            )
2135
            raise_exception_err(EXCP0D_GPF, 0);
2136
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137
    } else {
2138

    
2139
        if (selector & 0x4)
2140
            dt = &env->ldt;
2141
        else
2142
            dt = &env->gdt;
2143
        index = selector & ~7;
2144
        if ((index + 7) > dt->limit)
2145
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146
        ptr = dt->base + index;
2147
        e1 = ldl_kernel(ptr);
2148
        e2 = ldl_kernel(ptr + 4);
2149

    
2150
        if (!(e2 & DESC_S_MASK))
2151
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152
        rpl = selector & 3;
2153
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154
        if (seg_reg == R_SS) {
2155
            /* must be writable segment */
2156
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158
            if (rpl != cpl || dpl != cpl)
2159
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160
        } else {
2161
            /* must be readable segment */
2162
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164

    
2165
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166
                /* if not conforming code, test rights */
2167
                if (dpl < cpl || dpl < rpl)
2168
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169
            }
2170
        }
2171

    
2172
        if (!(e2 & DESC_P_MASK)) {
2173
            if (seg_reg == R_SS)
2174
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175
            else
2176
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177
        }
2178

    
2179
        /* set the access bit if not already set */
2180
        if (!(e2 & DESC_A_MASK)) {
2181
            e2 |= DESC_A_MASK;
2182
            stl_kernel(ptr + 4, e2);
2183
        }
2184

    
2185
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2186
                       get_seg_base(e1, e2),
2187
                       get_seg_limit(e1, e2),
2188
                       e2);
2189
#if 0
2190
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2191
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192
#endif
2193
    }
2194
}
2195

    
2196
/* protected mode jump */
2197
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198
                           int next_eip_addend)
2199
{
2200
    int gate_cs, type;
2201
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2202
    target_ulong next_eip;
2203

    
2204
    if ((new_cs & 0xfffc) == 0)
2205
        raise_exception_err(EXCP0D_GPF, 0);
2206
    if (load_segment(&e1, &e2, new_cs) != 0)
2207
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208
    cpl = env->hflags & HF_CPL_MASK;
2209
    if (e2 & DESC_S_MASK) {
2210
        if (!(e2 & DESC_CS_MASK))
2211
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213
        if (e2 & DESC_C_MASK) {
2214
            /* conforming code segment */
2215
            if (dpl > cpl)
2216
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217
        } else {
2218
            /* non conforming code segment */
2219
            rpl = new_cs & 3;
2220
            if (rpl > cpl)
2221
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222
            if (dpl != cpl)
2223
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224
        }
2225
        if (!(e2 & DESC_P_MASK))
2226
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227
        limit = get_seg_limit(e1, e2);
2228
        if (new_eip > limit &&
2229
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232
                       get_seg_base(e1, e2), limit, e2);
2233
        EIP = new_eip;
2234
    } else {
2235
        /* jump to call or task gate */
2236
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237
        rpl = new_cs & 3;
2238
        cpl = env->hflags & HF_CPL_MASK;
2239
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240
        switch(type) {
2241
        case 1: /* 286 TSS */
2242
        case 9: /* 386 TSS */
2243
        case 5: /* task gate */
2244
            if (dpl < cpl || dpl < rpl)
2245
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246
            next_eip = env->eip + next_eip_addend;
2247
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248
            CC_OP = CC_OP_EFLAGS;
2249
            break;
2250
        case 4: /* 286 call gate */
2251
        case 12: /* 386 call gate */
2252
            if ((dpl < cpl) || (dpl < rpl))
2253
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254
            if (!(e2 & DESC_P_MASK))
2255
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256
            gate_cs = e1 >> 16;
2257
            new_eip = (e1 & 0xffff);
2258
            if (type == 12)
2259
                new_eip |= (e2 & 0xffff0000);
2260
            if (load_segment(&e1, &e2, gate_cs) != 0)
2261
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263
            /* must be code segment */
2264
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265
                 (DESC_S_MASK | DESC_CS_MASK)))
2266
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270
            if (!(e2 & DESC_P_MASK))
2271
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272
            limit = get_seg_limit(e1, e2);
2273
            if (new_eip > limit)
2274
                raise_exception_err(EXCP0D_GPF, 0);
2275
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276
                                   get_seg_base(e1, e2), limit, e2);
2277
            EIP = new_eip;
2278
            break;
2279
        default:
2280
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281
            break;
2282
        }
2283
    }
2284
}
2285

    
2286
/* real mode call */
2287
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288
                       int shift, int next_eip)
2289
{
2290
    int new_eip;
2291
    uint32_t esp, esp_mask;
2292
    target_ulong ssp;
2293

    
2294
    new_eip = new_eip1;
2295
    esp = ESP;
2296
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297
    ssp = env->segs[R_SS].base;
2298
    if (shift) {
2299
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300
        PUSHL(ssp, esp, esp_mask, next_eip);
2301
    } else {
2302
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303
        PUSHW(ssp, esp, esp_mask, next_eip);
2304
    }
2305

    
2306
    SET_ESP(esp, esp_mask);
2307
    env->eip = new_eip;
2308
    env->segs[R_CS].selector = new_cs;
2309
    env->segs[R_CS].base = (new_cs << 4);
2310
}
2311

    
2312
/* protected mode call */
2313
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2314
                            int shift, int next_eip_addend)
2315
{
2316
    int new_stack, i;
2317
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2318
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2319
    uint32_t val, limit, old_sp_mask;
2320
    target_ulong ssp, old_ssp, next_eip;
2321

    
2322
    next_eip = env->eip + next_eip_addend;
2323
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324
    LOG_PCALL_STATE(env);
2325
    if ((new_cs & 0xfffc) == 0)
2326
        raise_exception_err(EXCP0D_GPF, 0);
2327
    if (load_segment(&e1, &e2, new_cs) != 0)
2328
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329
    cpl = env->hflags & HF_CPL_MASK;
2330
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2331
    if (e2 & DESC_S_MASK) {
2332
        if (!(e2 & DESC_CS_MASK))
2333
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335
        if (e2 & DESC_C_MASK) {
2336
            /* conforming code segment */
2337
            if (dpl > cpl)
2338
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339
        } else {
2340
            /* non conforming code segment */
2341
            rpl = new_cs & 3;
2342
            if (rpl > cpl)
2343
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344
            if (dpl != cpl)
2345
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346
        }
2347
        if (!(e2 & DESC_P_MASK))
2348
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349

    
2350
#ifdef TARGET_X86_64
2351
        /* XXX: check 16/32 bit cases in long mode */
2352
        if (shift == 2) {
2353
            target_ulong rsp;
2354
            /* 64 bit case */
2355
            rsp = ESP;
2356
            PUSHQ(rsp, env->segs[R_CS].selector);
2357
            PUSHQ(rsp, next_eip);
2358
            /* from this point, not restartable */
2359
            ESP = rsp;
2360
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361
                                   get_seg_base(e1, e2),
2362
                                   get_seg_limit(e1, e2), e2);
2363
            EIP = new_eip;
2364
        } else
2365
#endif
2366
        {
2367
            sp = ESP;
2368
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369
            ssp = env->segs[R_SS].base;
2370
            if (shift) {
2371
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372
                PUSHL(ssp, sp, sp_mask, next_eip);
2373
            } else {
2374
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375
                PUSHW(ssp, sp, sp_mask, next_eip);
2376
            }
2377

    
2378
            limit = get_seg_limit(e1, e2);
2379
            if (new_eip > limit)
2380
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381
            /* from this point, not restartable */
2382
            SET_ESP(sp, sp_mask);
2383
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384
                                   get_seg_base(e1, e2), limit, e2);
2385
            EIP = new_eip;
2386
        }
2387
    } else {
2388
        /* check gate type */
2389
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
        rpl = new_cs & 3;
2392
        switch(type) {
2393
        case 1: /* available 286 TSS */
2394
        case 9: /* available 386 TSS */
2395
        case 5: /* task gate */
2396
            if (dpl < cpl || dpl < rpl)
2397
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399
            CC_OP = CC_OP_EFLAGS;
2400
            return;
2401
        case 4: /* 286 call gate */
2402
        case 12: /* 386 call gate */
2403
            break;
2404
        default:
2405
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406
            break;
2407
        }
2408
        shift = type >> 3;
2409

    
2410
        if (dpl < cpl || dpl < rpl)
2411
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412
        /* check valid bit */
2413
        if (!(e2 & DESC_P_MASK))
2414
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2415
        selector = e1 >> 16;
2416
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417
        param_count = e2 & 0x1f;
2418
        if ((selector & 0xfffc) == 0)
2419
            raise_exception_err(EXCP0D_GPF, 0);
2420

    
2421
        if (load_segment(&e1, &e2, selector) != 0)
2422
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426
        if (dpl > cpl)
2427
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428
        if (!(e2 & DESC_P_MASK))
2429
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430

    
2431
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432
            /* to inner privilege */
2433
            get_ss_esp_from_tss(&ss, &sp, dpl);
2434
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2435
                        ss, sp, param_count, ESP);
2436
            if ((ss & 0xfffc) == 0)
2437
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438
            if ((ss & 3) != dpl)
2439
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443
            if (ss_dpl != dpl)
2444
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445
            if (!(ss_e2 & DESC_S_MASK) ||
2446
                (ss_e2 & DESC_CS_MASK) ||
2447
                !(ss_e2 & DESC_W_MASK))
2448
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449
            if (!(ss_e2 & DESC_P_MASK))
2450
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451

    
2452
            //            push_size = ((param_count * 2) + 8) << shift;
2453

    
2454
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455
            old_ssp = env->segs[R_SS].base;
2456

    
2457
            sp_mask = get_sp_mask(ss_e2);
2458
            ssp = get_seg_base(ss_e1, ss_e2);
2459
            if (shift) {
2460
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461
                PUSHL(ssp, sp, sp_mask, ESP);
2462
                for(i = param_count - 1; i >= 0; i--) {
2463
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464
                    PUSHL(ssp, sp, sp_mask, val);
2465
                }
2466
            } else {
2467
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468
                PUSHW(ssp, sp, sp_mask, ESP);
2469
                for(i = param_count - 1; i >= 0; i--) {
2470
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471
                    PUSHW(ssp, sp, sp_mask, val);
2472
                }
2473
            }
2474
            new_stack = 1;
2475
        } else {
2476
            /* to same privilege */
2477
            sp = ESP;
2478
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479
            ssp = env->segs[R_SS].base;
2480
            //            push_size = (4 << shift);
2481
            new_stack = 0;
2482
        }
2483

    
2484
        if (shift) {
2485
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486
            PUSHL(ssp, sp, sp_mask, next_eip);
2487
        } else {
2488
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489
            PUSHW(ssp, sp, sp_mask, next_eip);
2490
        }
2491

    
2492
        /* from this point, not restartable */
2493

    
2494
        if (new_stack) {
2495
            ss = (ss & ~3) | dpl;
2496
            cpu_x86_load_seg_cache(env, R_SS, ss,
2497
                                   ssp,
2498
                                   get_seg_limit(ss_e1, ss_e2),
2499
                                   ss_e2);
2500
        }
2501

    
2502
        selector = (selector & ~3) | dpl;
2503
        cpu_x86_load_seg_cache(env, R_CS, selector,
2504
                       get_seg_base(e1, e2),
2505
                       get_seg_limit(e1, e2),
2506
                       e2);
2507
        cpu_x86_set_cpl(env, dpl);
2508
        SET_ESP(sp, sp_mask);
2509
        EIP = offset;
2510
    }
2511
}
2512

    
2513
/* real and vm86 mode iret */
2514
void helper_iret_real(int shift)
2515
{
2516
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2517
    target_ulong ssp;
2518
    int eflags_mask;
2519

    
2520
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2521
    sp = ESP;
2522
    ssp = env->segs[R_SS].base;
2523
    if (shift == 1) {
2524
        /* 32 bits */
2525
        POPL(ssp, sp, sp_mask, new_eip);
2526
        POPL(ssp, sp, sp_mask, new_cs);
2527
        new_cs &= 0xffff;
2528
        POPL(ssp, sp, sp_mask, new_eflags);
2529
    } else {
2530
        /* 16 bits */
2531
        POPW(ssp, sp, sp_mask, new_eip);
2532
        POPW(ssp, sp, sp_mask, new_cs);
2533
        POPW(ssp, sp, sp_mask, new_eflags);
2534
    }
2535
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2536
    env->segs[R_CS].selector = new_cs;
2537
    env->segs[R_CS].base = (new_cs << 4);
2538
    env->eip = new_eip;
2539
    if (env->eflags & VM_MASK)
2540
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2541
    else
2542
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2543
    if (shift == 0)
2544
        eflags_mask &= 0xffff;
2545
    load_eflags(new_eflags, eflags_mask);
2546
    env->hflags2 &= ~HF2_NMI_MASK;
2547
}
2548

    
2549
static inline void validate_seg(int seg_reg, int cpl)
2550
{
2551
    int dpl;
2552
    uint32_t e2;
2553

    
2554
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2555
       they may still contain a valid base. I would be interested to
2556
       know how a real x86_64 CPU behaves */
2557
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2558
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2559
        return;
2560

    
2561
    e2 = env->segs[seg_reg].flags;
2562
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2564
        /* data or non conforming code segment */
2565
        if (dpl < cpl) {
2566
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2567
        }
2568
    }
2569
}
2570

    
2571
/* protected mode iret */
2572
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2573
{
2574
    uint32_t new_cs, new_eflags, new_ss;
2575
    uint32_t new_es, new_ds, new_fs, new_gs;
2576
    uint32_t e1, e2, ss_e1, ss_e2;
2577
    int cpl, dpl, rpl, eflags_mask, iopl;
2578
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2579

    
2580
#ifdef TARGET_X86_64
2581
    if (shift == 2)
2582
        sp_mask = -1;
2583
    else
2584
#endif
2585
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2586
    sp = ESP;
2587
    ssp = env->segs[R_SS].base;
2588
    new_eflags = 0; /* avoid warning */
2589
#ifdef TARGET_X86_64
2590
    if (shift == 2) {
2591
        POPQ(sp, new_eip);
2592
        POPQ(sp, new_cs);
2593
        new_cs &= 0xffff;
2594
        if (is_iret) {
2595
            POPQ(sp, new_eflags);
2596
        }
2597
    } else
2598
#endif
2599
    if (shift == 1) {
2600
        /* 32 bits */
2601
        POPL(ssp, sp, sp_mask, new_eip);
2602
        POPL(ssp, sp, sp_mask, new_cs);
2603
        new_cs &= 0xffff;
2604
        if (is_iret) {
2605
            POPL(ssp, sp, sp_mask, new_eflags);
2606
            if (new_eflags & VM_MASK)
2607
                goto return_to_vm86;
2608
        }
2609
    } else {
2610
        /* 16 bits */
2611
        POPW(ssp, sp, sp_mask, new_eip);
2612
        POPW(ssp, sp, sp_mask, new_cs);
2613
        if (is_iret)
2614
            POPW(ssp, sp, sp_mask, new_eflags);
2615
    }
2616
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2617
              new_cs, new_eip, shift, addend);
2618
    LOG_PCALL_STATE(env);
2619
    if ((new_cs & 0xfffc) == 0)
2620
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621
    if (load_segment(&e1, &e2, new_cs) != 0)
2622
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2623
    if (!(e2 & DESC_S_MASK) ||
2624
        !(e2 & DESC_CS_MASK))
2625
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626
    cpl = env->hflags & HF_CPL_MASK;
2627
    rpl = new_cs & 3;
2628
    if (rpl < cpl)
2629
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2631
    if (e2 & DESC_C_MASK) {
2632
        if (dpl > rpl)
2633
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634
    } else {
2635
        if (dpl != rpl)
2636
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637
    }
2638
    if (!(e2 & DESC_P_MASK))
2639
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2640

    
2641
    sp += addend;
2642
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2643
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2644
        /* return to same privilege level */
2645
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2646
                       get_seg_base(e1, e2),
2647
                       get_seg_limit(e1, e2),
2648
                       e2);
2649
    } else {
2650
        /* return to different privilege level */
2651
#ifdef TARGET_X86_64
2652
        if (shift == 2) {
2653
            POPQ(sp, new_esp);
2654
            POPQ(sp, new_ss);
2655
            new_ss &= 0xffff;
2656
        } else
2657
#endif
2658
        if (shift == 1) {
2659
            /* 32 bits */
2660
            POPL(ssp, sp, sp_mask, new_esp);
2661
            POPL(ssp, sp, sp_mask, new_ss);
2662
            new_ss &= 0xffff;
2663
        } else {
2664
            /* 16 bits */
2665
            POPW(ssp, sp, sp_mask, new_esp);
2666
            POPW(ssp, sp, sp_mask, new_ss);
2667
        }
2668
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2669
                    new_ss, new_esp);
2670
        if ((new_ss & 0xfffc) == 0) {
2671
#ifdef TARGET_X86_64
2672
            /* NULL ss is allowed in long mode if cpl != 3*/
2673
            /* XXX: test CS64 ? */
2674
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2675
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2676
                                       0, 0xffffffff,
2677
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2678
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2679
                                       DESC_W_MASK | DESC_A_MASK);
2680
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2681
            } else
2682
#endif
2683
            {
2684
                raise_exception_err(EXCP0D_GPF, 0);
2685
            }
2686
        } else {
2687
            if ((new_ss & 3) != rpl)
2688
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2690
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691
            if (!(ss_e2 & DESC_S_MASK) ||
2692
                (ss_e2 & DESC_CS_MASK) ||
2693
                !(ss_e2 & DESC_W_MASK))
2694
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2696
            if (dpl != rpl)
2697
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2698
            if (!(ss_e2 & DESC_P_MASK))
2699
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2700
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2701
                                   get_seg_base(ss_e1, ss_e2),
2702
                                   get_seg_limit(ss_e1, ss_e2),
2703
                                   ss_e2);
2704
        }
2705

    
2706
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2707
                       get_seg_base(e1, e2),
2708
                       get_seg_limit(e1, e2),
2709
                       e2);
2710
        cpu_x86_set_cpl(env, rpl);
2711
        sp = new_esp;
2712
#ifdef TARGET_X86_64
2713
        if (env->hflags & HF_CS64_MASK)
2714
            sp_mask = -1;
2715
        else
2716
#endif
2717
            sp_mask = get_sp_mask(ss_e2);
2718

    
2719
        /* validate data segments */
2720
        validate_seg(R_ES, rpl);
2721
        validate_seg(R_DS, rpl);
2722
        validate_seg(R_FS, rpl);
2723
        validate_seg(R_GS, rpl);
2724

    
2725
        sp += addend;
2726
    }
2727
    SET_ESP(sp, sp_mask);
2728
    env->eip = new_eip;
2729
    if (is_iret) {
2730
        /* NOTE: 'cpl' is the _old_ CPL */
2731
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2732
        if (cpl == 0)
2733
            eflags_mask |= IOPL_MASK;
2734
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2735
        if (cpl <= iopl)
2736
            eflags_mask |= IF_MASK;
2737
        if (shift == 0)
2738
            eflags_mask &= 0xffff;
2739
        load_eflags(new_eflags, eflags_mask);
2740
    }
2741
    return;
2742

    
2743
 return_to_vm86:
2744
    POPL(ssp, sp, sp_mask, new_esp);
2745
    POPL(ssp, sp, sp_mask, new_ss);
2746
    POPL(ssp, sp, sp_mask, new_es);
2747
    POPL(ssp, sp, sp_mask, new_ds);
2748
    POPL(ssp, sp, sp_mask, new_fs);
2749
    POPL(ssp, sp, sp_mask, new_gs);
2750

    
2751
    /* modify processor state */
2752
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2753
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2754
    load_seg_vm(R_CS, new_cs & 0xffff);
2755
    cpu_x86_set_cpl(env, 3);
2756
    load_seg_vm(R_SS, new_ss & 0xffff);
2757
    load_seg_vm(R_ES, new_es & 0xffff);
2758
    load_seg_vm(R_DS, new_ds & 0xffff);
2759
    load_seg_vm(R_FS, new_fs & 0xffff);
2760
    load_seg_vm(R_GS, new_gs & 0xffff);
2761

    
2762
    env->eip = new_eip & 0xffff;
2763
    ESP = new_esp;
2764
}
2765

    
2766
void helper_iret_protected(int shift, int next_eip)
2767
{
2768
    int tss_selector, type;
2769
    uint32_t e1, e2;
2770

    
2771
    /* specific case for TSS */
2772
    if (env->eflags & NT_MASK) {
2773
#ifdef TARGET_X86_64
2774
        if (env->hflags & HF_LMA_MASK)
2775
            raise_exception_err(EXCP0D_GPF, 0);
2776
#endif
2777
        tss_selector = lduw_kernel(env->tr.base + 0);
2778
        if (tss_selector & 4)
2779
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780
        if (load_segment(&e1, &e2, tss_selector) != 0)
2781
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2783
        /* NOTE: we check both segment and busy TSS */
2784
        if (type != 3)
2785
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2787
    } else {
2788
        helper_ret_protected(shift, 1, 0);
2789
    }
2790
    env->hflags2 &= ~HF2_NMI_MASK;
2791
}
2792

    
2793
void helper_lret_protected(int shift, int addend)
2794
{
2795
    helper_ret_protected(shift, 0, addend);
2796
}
2797

    
2798
void helper_sysenter(void)
2799
{
2800
    if (env->sysenter_cs == 0) {
2801
        raise_exception_err(EXCP0D_GPF, 0);
2802
    }
2803
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2804
    cpu_x86_set_cpl(env, 0);
2805

    
2806
#ifdef TARGET_X86_64
2807
    if (env->hflags & HF_LMA_MASK) {
2808
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809
                               0, 0xffffffff,
2810
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811
                               DESC_S_MASK |
2812
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2813
    } else
2814
#endif
2815
    {
2816
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2817
                               0, 0xffffffff,
2818
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819
                               DESC_S_MASK |
2820
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2821
    }
2822
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2823
                           0, 0xffffffff,
2824
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2825
                           DESC_S_MASK |
2826
                           DESC_W_MASK | DESC_A_MASK);
2827
    ESP = env->sysenter_esp;
2828
    EIP = env->sysenter_eip;
2829
}
2830

    
2831
void helper_sysexit(int dflag)
2832
{
2833
    int cpl;
2834

    
2835
    cpl = env->hflags & HF_CPL_MASK;
2836
    if (env->sysenter_cs == 0 || cpl != 0) {
2837
        raise_exception_err(EXCP0D_GPF, 0);
2838
    }
2839
    cpu_x86_set_cpl(env, 3);
2840
#ifdef TARGET_X86_64
2841
    if (dflag == 2) {
2842
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2843
                               0, 0xffffffff,
2844
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2847
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2848
                               0, 0xffffffff,
2849
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851
                               DESC_W_MASK | DESC_A_MASK);
2852
    } else
2853
#endif
2854
    {
2855
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2856
                               0, 0xffffffff,
2857
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2860
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2861
                               0, 0xffffffff,
2862
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864
                               DESC_W_MASK | DESC_A_MASK);
2865
    }
2866
    ESP = ECX;
2867
    EIP = EDX;
2868
}
2869

    
2870
#if defined(CONFIG_USER_ONLY)
2871
target_ulong helper_read_crN(int reg)
2872
{
2873
    return 0;
2874
}
2875

    
2876
void helper_write_crN(int reg, target_ulong t0)
2877
{
2878
}
2879

    
2880
void helper_movl_drN_T0(int reg, target_ulong t0)
2881
{
2882
}
2883
#else
2884
target_ulong helper_read_crN(int reg)
2885
{
2886
    target_ulong val;
2887

    
2888
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2889
    switch(reg) {
2890
    default:
2891
        val = env->cr[reg];
2892
        break;
2893
    case 8:
2894
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2895
            val = cpu_get_apic_tpr(env->apic_state);
2896
        } else {
2897
            val = env->v_tpr;
2898
        }
2899
        break;
2900
    }
2901
    return val;
2902
}
2903

    
2904
void helper_write_crN(int reg, target_ulong t0)
2905
{
2906
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2907
    switch(reg) {
2908
    case 0:
2909
        cpu_x86_update_cr0(env, t0);
2910
        break;
2911
    case 3:
2912
        cpu_x86_update_cr3(env, t0);
2913
        break;
2914
    case 4:
2915
        cpu_x86_update_cr4(env, t0);
2916
        break;
2917
    case 8:
2918
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2919
            cpu_set_apic_tpr(env->apic_state, t0);
2920
        }
2921
        env->v_tpr = t0 & 0x0f;
2922
        break;
2923
    default:
2924
        env->cr[reg] = t0;
2925
        break;
2926
    }
2927
}
2928

    
2929
void helper_movl_drN_T0(int reg, target_ulong t0)
2930
{
2931
    int i;
2932

    
2933
    if (reg < 4) {
2934
        hw_breakpoint_remove(env, reg);
2935
        env->dr[reg] = t0;
2936
        hw_breakpoint_insert(env, reg);
2937
    } else if (reg == 7) {
2938
        for (i = 0; i < 4; i++)
2939
            hw_breakpoint_remove(env, i);
2940
        env->dr[7] = t0;
2941
        for (i = 0; i < 4; i++)
2942
            hw_breakpoint_insert(env, i);
2943
    } else
2944
        env->dr[reg] = t0;
2945
}
2946
#endif
2947

    
2948
void helper_lmsw(target_ulong t0)
2949
{
2950
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951
       if already set to one. */
2952
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2953
    helper_write_crN(0, t0);
2954
}
2955

    
2956
void helper_clts(void)
2957
{
2958
    env->cr[0] &= ~CR0_TS_MASK;
2959
    env->hflags &= ~HF_TS_MASK;
2960
}
2961

    
2962
void helper_invlpg(target_ulong addr)
2963
{
2964
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2965
    tlb_flush_page(env, addr);
2966
}
2967

    
2968
void helper_rdtsc(void)
2969
{
2970
    uint64_t val;
2971

    
2972
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2973
        raise_exception(EXCP0D_GPF);
2974
    }
2975
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2976

    
2977
    val = cpu_get_tsc(env) + env->tsc_offset;
2978
    EAX = (uint32_t)(val);
2979
    EDX = (uint32_t)(val >> 32);
2980
}
2981

    
2982
void helper_rdtscp(void)
2983
{
2984
    helper_rdtsc();
2985
    ECX = (uint32_t)(env->tsc_aux);
2986
}
2987

    
2988
void helper_rdpmc(void)
2989
{
2990
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2991
        raise_exception(EXCP0D_GPF);
2992
    }
2993
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2994
    
2995
    /* currently unimplemented */
2996
    raise_exception_err(EXCP06_ILLOP, 0);
2997
}
2998

    
2999
#if defined(CONFIG_USER_ONLY)
3000
void helper_wrmsr(void)
3001
{
3002
}
3003

    
3004
void helper_rdmsr(void)
3005
{
3006
}
3007
#else
3008
void helper_wrmsr(void)
3009
{
3010
    uint64_t val;
3011

    
3012
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3013

    
3014
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3015

    
3016
    switch((uint32_t)ECX) {
3017
    case MSR_IA32_SYSENTER_CS:
3018
        env->sysenter_cs = val & 0xffff;
3019
        break;
3020
    case MSR_IA32_SYSENTER_ESP:
3021
        env->sysenter_esp = val;
3022
        break;
3023
    case MSR_IA32_SYSENTER_EIP:
3024
        env->sysenter_eip = val;
3025
        break;
3026
    case MSR_IA32_APICBASE:
3027
        cpu_set_apic_base(env->apic_state, val);
3028
        break;
3029
    case MSR_EFER:
3030
        {
3031
            uint64_t update_mask;
3032
            update_mask = 0;
3033
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3034
                update_mask |= MSR_EFER_SCE;
3035
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3036
                update_mask |= MSR_EFER_LME;
3037
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038
                update_mask |= MSR_EFER_FFXSR;
3039
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3040
                update_mask |= MSR_EFER_NXE;
3041
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3042
                update_mask |= MSR_EFER_SVME;
3043
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3044
                update_mask |= MSR_EFER_FFXSR;
3045
            cpu_load_efer(env, (env->efer & ~update_mask) |
3046
                          (val & update_mask));
3047
        }
3048
        break;
3049
    case MSR_STAR:
3050
        env->star = val;
3051
        break;
3052
    case MSR_PAT:
3053
        env->pat = val;
3054
        break;
3055
    case MSR_VM_HSAVE_PA:
3056
        env->vm_hsave = val;
3057
        break;
3058
#ifdef TARGET_X86_64
3059
    case MSR_LSTAR:
3060
        env->lstar = val;
3061
        break;
3062
    case MSR_CSTAR:
3063
        env->cstar = val;
3064
        break;
3065
    case MSR_FMASK:
3066
        env->fmask = val;
3067
        break;
3068
    case MSR_FSBASE:
3069
        env->segs[R_FS].base = val;
3070
        break;
3071
    case MSR_GSBASE:
3072
        env->segs[R_GS].base = val;
3073
        break;
3074
    case MSR_KERNELGSBASE:
3075
        env->kernelgsbase = val;
3076
        break;
3077
#endif
3078
    case MSR_MTRRphysBase(0):
3079
    case MSR_MTRRphysBase(1):
3080
    case MSR_MTRRphysBase(2):
3081
    case MSR_MTRRphysBase(3):
3082
    case MSR_MTRRphysBase(4):
3083
    case MSR_MTRRphysBase(5):
3084
    case MSR_MTRRphysBase(6):
3085
    case MSR_MTRRphysBase(7):
3086
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3087
        break;
3088
    case MSR_MTRRphysMask(0):
3089
    case MSR_MTRRphysMask(1):
3090
    case MSR_MTRRphysMask(2):
3091
    case MSR_MTRRphysMask(3):
3092
    case MSR_MTRRphysMask(4):
3093
    case MSR_MTRRphysMask(5):
3094
    case MSR_MTRRphysMask(6):
3095
    case MSR_MTRRphysMask(7):
3096
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3097
        break;
3098
    case MSR_MTRRfix64K_00000:
3099
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3100
        break;
3101
    case MSR_MTRRfix16K_80000:
3102
    case MSR_MTRRfix16K_A0000:
3103
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3104
        break;
3105
    case MSR_MTRRfix4K_C0000:
3106
    case MSR_MTRRfix4K_C8000:
3107
    case MSR_MTRRfix4K_D0000:
3108
    case MSR_MTRRfix4K_D8000:
3109
    case MSR_MTRRfix4K_E0000:
3110
    case MSR_MTRRfix4K_E8000:
3111
    case MSR_MTRRfix4K_F0000:
3112
    case MSR_MTRRfix4K_F8000:
3113
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3114
        break;
3115
    case MSR_MTRRdefType:
3116
        env->mtrr_deftype = val;
3117
        break;
3118
    case MSR_MCG_STATUS:
3119
        env->mcg_status = val;
3120
        break;
3121
    case MSR_MCG_CTL:
3122
        if ((env->mcg_cap & MCG_CTL_P)
3123
            && (val == 0 || val == ~(uint64_t)0))
3124
            env->mcg_ctl = val;
3125
        break;
3126
    case MSR_TSC_AUX:
3127
        env->tsc_aux = val;
3128
        break;
3129
    default:
3130
        if ((uint32_t)ECX >= MSR_MC0_CTL
3131
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3132
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3133
            if ((offset & 0x3) != 0
3134
                || (val == 0 || val == ~(uint64_t)0))
3135
                env->mce_banks[offset] = val;
3136
            break;
3137
        }
3138
        /* XXX: exception ? */
3139
        break;
3140
    }
3141
}
3142

    
3143
void helper_rdmsr(void)
3144
{
3145
    uint64_t val;
3146

    
3147
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3148

    
3149
    switch((uint32_t)ECX) {
3150
    case MSR_IA32_SYSENTER_CS:
3151
        val = env->sysenter_cs;
3152
        break;
3153
    case MSR_IA32_SYSENTER_ESP:
3154
        val = env->sysenter_esp;
3155
        break;
3156
    case MSR_IA32_SYSENTER_EIP:
3157
        val = env->sysenter_eip;
3158
        break;
3159
    case MSR_IA32_APICBASE:
3160
        val = cpu_get_apic_base(env->apic_state);
3161
        break;
3162
    case MSR_EFER:
3163
        val = env->efer;
3164
        break;
3165
    case MSR_STAR:
3166
        val = env->star;
3167
        break;
3168
    case MSR_PAT:
3169
        val = env->pat;
3170
        break;
3171
    case MSR_VM_HSAVE_PA:
3172
        val = env->vm_hsave;
3173
        break;
3174
    case MSR_IA32_PERF_STATUS:
3175
        /* tsc_increment_by_tick */
3176
        val = 1000ULL;
3177
        /* CPU multiplier */
3178
        val |= (((uint64_t)4ULL) << 40);
3179
        break;
3180
#ifdef TARGET_X86_64
3181
    case MSR_LSTAR:
3182
        val = env->lstar;
3183
        break;
3184
    case MSR_CSTAR:
3185
        val = env->cstar;
3186
        break;
3187
    case MSR_FMASK:
3188
        val = env->fmask;
3189
        break;
3190
    case MSR_FSBASE:
3191
        val = env->segs[R_FS].base;
3192
        break;
3193
    case MSR_GSBASE:
3194
        val = env->segs[R_GS].base;
3195
        break;
3196
    case MSR_KERNELGSBASE:
3197
        val = env->kernelgsbase;
3198
        break;
3199
    case MSR_TSC_AUX:
3200
        val = env->tsc_aux;
3201
        break;
3202
#endif
3203
    case MSR_MTRRphysBase(0):
3204
    case MSR_MTRRphysBase(1):
3205
    case MSR_MTRRphysBase(2):
3206
    case MSR_MTRRphysBase(3):
3207
    case MSR_MTRRphysBase(4):
3208
    case MSR_MTRRphysBase(5):
3209
    case MSR_MTRRphysBase(6):
3210
    case MSR_MTRRphysBase(7):
3211
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3212
        break;
3213
    case MSR_MTRRphysMask(0):
3214
    case MSR_MTRRphysMask(1):
3215
    case MSR_MTRRphysMask(2):
3216
    case MSR_MTRRphysMask(3):
3217
    case MSR_MTRRphysMask(4):
3218
    case MSR_MTRRphysMask(5):
3219
    case MSR_MTRRphysMask(6):
3220
    case MSR_MTRRphysMask(7):
3221
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3222
        break;
3223
    case MSR_MTRRfix64K_00000:
3224
        val = env->mtrr_fixed[0];
3225
        break;
3226
    case MSR_MTRRfix16K_80000:
3227
    case MSR_MTRRfix16K_A0000:
3228
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3229
        break;
3230
    case MSR_MTRRfix4K_C0000:
3231
    case MSR_MTRRfix4K_C8000:
3232
    case MSR_MTRRfix4K_D0000:
3233
    case MSR_MTRRfix4K_D8000:
3234
    case MSR_MTRRfix4K_E0000:
3235
    case MSR_MTRRfix4K_E8000:
3236
    case MSR_MTRRfix4K_F0000:
3237
    case MSR_MTRRfix4K_F8000:
3238
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3239
        break;
3240
    case MSR_MTRRdefType:
3241
        val = env->mtrr_deftype;
3242
        break;
3243
    case MSR_MTRRcap:
3244
        if (env->cpuid_features & CPUID_MTRR)
3245
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3246
        else
3247
            /* XXX: exception ? */
3248
            val = 0;
3249
        break;
3250
    case MSR_MCG_CAP:
3251
        val = env->mcg_cap;
3252
        break;
3253
    case MSR_MCG_CTL:
3254
        if (env->mcg_cap & MCG_CTL_P)
3255
            val = env->mcg_ctl;
3256
        else
3257
            val = 0;
3258
        break;
3259
    case MSR_MCG_STATUS:
3260
        val = env->mcg_status;
3261
        break;
3262
    default:
3263
        if ((uint32_t)ECX >= MSR_MC0_CTL
3264
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3265
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3266
            val = env->mce_banks[offset];
3267
            break;
3268
        }
3269
        /* XXX: exception ? */
3270
        val = 0;
3271
        break;
3272
    }
3273
    EAX = (uint32_t)(val);
3274
    EDX = (uint32_t)(val >> 32);
3275
}
3276
#endif
3277

    
3278
target_ulong helper_lsl(target_ulong selector1)
3279
{
3280
    unsigned int limit;
3281
    uint32_t e1, e2, eflags, selector;
3282
    int rpl, dpl, cpl, type;
3283

    
3284
    selector = selector1 & 0xffff;
3285
    eflags = helper_cc_compute_all(CC_OP);
3286
    if ((selector & 0xfffc) == 0)
3287
        goto fail;
3288
    if (load_segment(&e1, &e2, selector) != 0)
3289
        goto fail;
3290
    rpl = selector & 3;
3291
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292
    cpl = env->hflags & HF_CPL_MASK;
3293
    if (e2 & DESC_S_MASK) {
3294
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3295
            /* conforming */
3296
        } else {
3297
            if (dpl < cpl || dpl < rpl)
3298
                goto fail;
3299
        }
3300
    } else {
3301
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3302
        switch(type) {
3303
        case 1:
3304
        case 2:
3305
        case 3:
3306
        case 9:
3307
        case 11:
3308
            break;
3309
        default:
3310
            goto fail;
3311
        }
3312
        if (dpl < cpl || dpl < rpl) {
3313
        fail:
3314
            CC_SRC = eflags & ~CC_Z;
3315
            return 0;
3316
        }
3317
    }
3318
    limit = get_seg_limit(e1, e2);
3319
    CC_SRC = eflags | CC_Z;
3320
    return limit;
3321
}
3322

    
3323
target_ulong helper_lar(target_ulong selector1)
3324
{
3325
    uint32_t e1, e2, eflags, selector;
3326
    int rpl, dpl, cpl, type;
3327

    
3328
    selector = selector1 & 0xffff;
3329
    eflags = helper_cc_compute_all(CC_OP);
3330
    if ((selector & 0xfffc) == 0)
3331
        goto fail;
3332
    if (load_segment(&e1, &e2, selector) != 0)
3333
        goto fail;
3334
    rpl = selector & 3;
3335
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3336
    cpl = env->hflags & HF_CPL_MASK;
3337
    if (e2 & DESC_S_MASK) {
3338
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3339
            /* conforming */
3340
        } else {
3341
            if (dpl < cpl || dpl < rpl)
3342
                goto fail;
3343
        }
3344
    } else {
3345
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3346
        switch(type) {
3347
        case 1:
3348
        case 2:
3349
        case 3:
3350
        case 4:
3351
        case 5:
3352
        case 9:
3353
        case 11:
3354
        case 12:
3355
            break;
3356
        default:
3357
            goto fail;
3358
        }
3359
        if (dpl < cpl || dpl < rpl) {
3360
        fail:
3361
            CC_SRC = eflags & ~CC_Z;
3362
            return 0;
3363
        }
3364
    }
3365
    CC_SRC = eflags | CC_Z;
3366
    return e2 & 0x00f0ff00;
3367
}
3368

    
3369
void helper_verr(target_ulong selector1)
3370
{
3371
    uint32_t e1, e2, eflags, selector;
3372
    int rpl, dpl, cpl;
3373

    
3374
    selector = selector1 & 0xffff;
3375
    eflags = helper_cc_compute_all(CC_OP);
3376
    if ((selector & 0xfffc) == 0)
3377
        goto fail;
3378
    if (load_segment(&e1, &e2, selector) != 0)
3379
        goto fail;
3380
    if (!(e2 & DESC_S_MASK))
3381
        goto fail;
3382
    rpl = selector & 3;
3383
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3384
    cpl = env->hflags & HF_CPL_MASK;
3385
    if (e2 & DESC_CS_MASK) {
3386
        if (!(e2 & DESC_R_MASK))
3387
            goto fail;
3388
        if (!(e2 & DESC_C_MASK)) {
3389
            if (dpl < cpl || dpl < rpl)
3390
                goto fail;
3391
        }
3392
    } else {
3393
        if (dpl < cpl || dpl < rpl) {
3394
        fail:
3395
            CC_SRC = eflags & ~CC_Z;
3396
            return;
3397
        }
3398
    }
3399
    CC_SRC = eflags | CC_Z;
3400
}
3401

    
3402
void helper_verw(target_ulong selector1)
3403
{
3404
    uint32_t e1, e2, eflags, selector;
3405
    int rpl, dpl, cpl;
3406

    
3407
    selector = selector1 & 0xffff;
3408
    eflags = helper_cc_compute_all(CC_OP);
3409
    if ((selector & 0xfffc) == 0)
3410
        goto fail;
3411
    if (load_segment(&e1, &e2, selector) != 0)
3412
        goto fail;
3413
    if (!(e2 & DESC_S_MASK))
3414
        goto fail;
3415
    rpl = selector & 3;
3416
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3417
    cpl = env->hflags & HF_CPL_MASK;
3418
    if (e2 & DESC_CS_MASK) {
3419
        goto fail;
3420
    } else {
3421
        if (dpl < cpl || dpl < rpl)
3422
            goto fail;
3423
        if (!(e2 & DESC_W_MASK)) {
3424
        fail:
3425
            CC_SRC = eflags & ~CC_Z;
3426
            return;
3427
        }
3428
    }
3429
    CC_SRC = eflags | CC_Z;
3430
}
3431

    
3432
/* x87 FPU helpers */
3433

    
3434
static void fpu_set_exception(int mask)
3435
{
3436
    env->fpus |= mask;
3437
    if (env->fpus & (~env->fpuc & FPUC_EM))
3438
        env->fpus |= FPUS_SE | FPUS_B;
3439
}
3440

    
3441
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3442
{
3443
    if (b == 0.0)
3444
        fpu_set_exception(FPUS_ZE);
3445
    return a / b;
3446
}
3447

    
3448
static void fpu_raise_exception(void)
3449
{
3450
    if (env->cr[0] & CR0_NE_MASK) {
3451
        raise_exception(EXCP10_COPR);
3452
    }
3453
#if !defined(CONFIG_USER_ONLY)
3454
    else {
3455
        cpu_set_ferr(env);
3456
    }
3457
#endif
3458
}
3459

    
3460
void helper_flds_FT0(uint32_t val)
3461
{
3462
    union {
3463
        float32 f;
3464
        uint32_t i;
3465
    } u;
3466
    u.i = val;
3467
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3468
}
3469

    
3470
void helper_fldl_FT0(uint64_t val)
3471
{
3472
    union {
3473
        float64 f;
3474
        uint64_t i;
3475
    } u;
3476
    u.i = val;
3477
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3478
}
3479

    
3480
void helper_fildl_FT0(int32_t val)
3481
{
3482
    FT0 = int32_to_floatx(val, &env->fp_status);
3483
}
3484

    
3485
void helper_flds_ST0(uint32_t val)
3486
{
3487
    int new_fpstt;
3488
    union {
3489
        float32 f;
3490
        uint32_t i;
3491
    } u;
3492
    new_fpstt = (env->fpstt - 1) & 7;
3493
    u.i = val;
3494
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3495
    env->fpstt = new_fpstt;
3496
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3497
}
3498

    
3499
void helper_fldl_ST0(uint64_t val)
3500
{
3501
    int new_fpstt;
3502
    union {
3503
        float64 f;
3504
        uint64_t i;
3505
    } u;
3506
    new_fpstt = (env->fpstt - 1) & 7;
3507
    u.i = val;
3508
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3509
    env->fpstt = new_fpstt;
3510
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3511
}
3512

    
3513
void helper_fildl_ST0(int32_t val)
3514
{
3515
    int new_fpstt;
3516
    new_fpstt = (env->fpstt - 1) & 7;
3517
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3518
    env->fpstt = new_fpstt;
3519
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3520
}
3521

    
3522
void helper_fildll_ST0(int64_t val)
3523
{
3524
    int new_fpstt;
3525
    new_fpstt = (env->fpstt - 1) & 7;
3526
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3527
    env->fpstt = new_fpstt;
3528
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3529
}
3530

    
3531
uint32_t helper_fsts_ST0(void)
3532
{
3533
    union {
3534
        float32 f;
3535
        uint32_t i;
3536
    } u;
3537
    u.f = floatx_to_float32(ST0, &env->fp_status);
3538
    return u.i;
3539
}
3540

    
3541
uint64_t helper_fstl_ST0(void)
3542
{
3543
    union {
3544
        float64 f;
3545
        uint64_t i;
3546
    } u;
3547
    u.f = floatx_to_float64(ST0, &env->fp_status);
3548
    return u.i;
3549
}
3550

    
3551
int32_t helper_fist_ST0(void)
3552
{
3553
    int32_t val;
3554
    val = floatx_to_int32(ST0, &env->fp_status);
3555
    if (val != (int16_t)val)
3556
        val = -32768;
3557
    return val;
3558
}
3559

    
3560
int32_t helper_fistl_ST0(void)
3561
{
3562
    int32_t val;
3563
    val = floatx_to_int32(ST0, &env->fp_status);
3564
    return val;
3565
}
3566

    
3567
int64_t helper_fistll_ST0(void)
3568
{
3569
    int64_t val;
3570
    val = floatx_to_int64(ST0, &env->fp_status);
3571
    return val;
3572
}
3573

    
3574
int32_t helper_fistt_ST0(void)
3575
{
3576
    int32_t val;
3577
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3578
    if (val != (int16_t)val)
3579
        val = -32768;
3580
    return val;
3581
}
3582

    
3583
int32_t helper_fisttl_ST0(void)
3584
{
3585
    int32_t val;
3586
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3587
    return val;
3588
}
3589

    
3590
int64_t helper_fisttll_ST0(void)
3591
{
3592
    int64_t val;
3593
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3594
    return val;
3595
}
3596

    
3597
void helper_fldt_ST0(target_ulong ptr)
3598
{
3599
    int new_fpstt;
3600
    new_fpstt = (env->fpstt - 1) & 7;
3601
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3602
    env->fpstt = new_fpstt;
3603
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3604
}
3605

    
3606
void helper_fstt_ST0(target_ulong ptr)
3607
{
3608
    helper_fstt(ST0, ptr);
3609
}
3610

    
3611
void helper_fpush(void)
3612
{
3613
    fpush();
3614
}
3615

    
3616
void helper_fpop(void)
3617
{
3618
    fpop();
3619
}
3620

    
3621
void helper_fdecstp(void)
3622
{
3623
    env->fpstt = (env->fpstt - 1) & 7;
3624
    env->fpus &= (~0x4700);
3625
}
3626

    
3627
void helper_fincstp(void)
3628
{
3629
    env->fpstt = (env->fpstt + 1) & 7;
3630
    env->fpus &= (~0x4700);
3631
}
3632

    
3633
/* FPU move */
3634

    
3635
void helper_ffree_STN(int st_index)
3636
{
3637
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3638
}
3639

    
3640
void helper_fmov_ST0_FT0(void)
3641
{
3642
    ST0 = FT0;
3643
}
3644

    
3645
void helper_fmov_FT0_STN(int st_index)
3646
{
3647
    FT0 = ST(st_index);
3648
}
3649

    
3650
void helper_fmov_ST0_STN(int st_index)
3651
{
3652
    ST0 = ST(st_index);
3653
}
3654

    
3655
void helper_fmov_STN_ST0(int st_index)
3656
{
3657
    ST(st_index) = ST0;
3658
}
3659

    
3660
void helper_fxchg_ST0_STN(int st_index)
3661
{
3662
    CPU86_LDouble tmp;
3663
    tmp = ST(st_index);
3664
    ST(st_index) = ST0;
3665
    ST0 = tmp;
3666
}
3667

    
3668
/* FPU operations */
3669

    
3670
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3671

    
3672
void helper_fcom_ST0_FT0(void)
3673
{
3674
    int ret;
3675

    
3676
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3677
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3678
}
3679

    
3680
void helper_fucom_ST0_FT0(void)
3681
{
3682
    int ret;
3683

    
3684
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3685
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3686
}
3687

    
3688
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3689

    
3690
void helper_fcomi_ST0_FT0(void)
3691
{
3692
    int eflags;
3693
    int ret;
3694

    
3695
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3696
    eflags = helper_cc_compute_all(CC_OP);
3697
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3698
    CC_SRC = eflags;
3699
}
3700

    
3701
void helper_fucomi_ST0_FT0(void)
3702
{
3703
    int eflags;
3704
    int ret;
3705

    
3706
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3707
    eflags = helper_cc_compute_all(CC_OP);
3708
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3709
    CC_SRC = eflags;
3710
}
3711

    
3712
void helper_fadd_ST0_FT0(void)
3713
{
3714
    ST0 += FT0;
3715
}
3716

    
3717
void helper_fmul_ST0_FT0(void)
3718
{
3719
    ST0 *= FT0;
3720
}
3721

    
3722
void helper_fsub_ST0_FT0(void)
3723
{
3724
    ST0 -= FT0;
3725
}
3726

    
3727
void helper_fsubr_ST0_FT0(void)
3728
{
3729
    ST0 = FT0 - ST0;
3730
}
3731

    
3732
void helper_fdiv_ST0_FT0(void)
3733
{
3734
    ST0 = helper_fdiv(ST0, FT0);
3735
}
3736

    
3737
void helper_fdivr_ST0_FT0(void)
3738
{
3739
    ST0 = helper_fdiv(FT0, ST0);
3740
}
3741

    
3742
/* fp operations between STN and ST0 */
3743

    
3744
void helper_fadd_STN_ST0(int st_index)
3745
{
3746
    ST(st_index) += ST0;
3747
}
3748

    
3749
void helper_fmul_STN_ST0(int st_index)
3750
{
3751
    ST(st_index) *= ST0;
3752
}
3753

    
3754
void helper_fsub_STN_ST0(int st_index)
3755
{
3756
    ST(st_index) -= ST0;
3757
}
3758

    
3759
void helper_fsubr_STN_ST0(int st_index)
3760
{
3761
    CPU86_LDouble *p;
3762
    p = &ST(st_index);
3763
    *p = ST0 - *p;
3764
}
3765

    
3766
void helper_fdiv_STN_ST0(int st_index)
3767
{
3768
    CPU86_LDouble *p;
3769
    p = &ST(st_index);
3770
    *p = helper_fdiv(*p, ST0);
3771
}
3772

    
3773
void helper_fdivr_STN_ST0(int st_index)
3774
{
3775
    CPU86_LDouble *p;
3776
    p = &ST(st_index);
3777
    *p = helper_fdiv(ST0, *p);
3778
}
3779

    
3780
/* misc FPU operations */
3781
void helper_fchs_ST0(void)
3782
{
3783
    ST0 = floatx_chs(ST0);
3784
}
3785

    
3786
void helper_fabs_ST0(void)
3787
{
3788
    ST0 = floatx_abs(ST0);
3789
}
3790

    
3791
void helper_fld1_ST0(void)
3792
{
3793
    ST0 = f15rk[1];
3794
}
3795

    
3796
void helper_fldl2t_ST0(void)
3797
{
3798
    ST0 = f15rk[6];
3799
}
3800

    
3801
void helper_fldl2e_ST0(void)
3802
{
3803
    ST0 = f15rk[5];
3804
}
3805

    
3806
void helper_fldpi_ST0(void)
3807
{
3808
    ST0 = f15rk[2];
3809
}
3810

    
3811
void helper_fldlg2_ST0(void)
3812
{
3813
    ST0 = f15rk[3];
3814
}
3815

    
3816
void helper_fldln2_ST0(void)
3817
{
3818
    ST0 = f15rk[4];
3819
}
3820

    
3821
void helper_fldz_ST0(void)
3822
{
3823
    ST0 = f15rk[0];
3824
}
3825

    
3826
void helper_fldz_FT0(void)
3827
{
3828
    FT0 = f15rk[0];
3829
}
3830

    
3831
uint32_t helper_fnstsw(void)
3832
{
3833
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3834
}
3835

    
3836
uint32_t helper_fnstcw(void)
3837
{
3838
    return env->fpuc;
3839
}
3840

    
3841
static void update_fp_status(void)
3842
{
3843
    int rnd_type;
3844

    
3845
    /* set rounding mode */
3846
    switch(env->fpuc & RC_MASK) {
3847
    default:
3848
    case RC_NEAR:
3849
        rnd_type = float_round_nearest_even;
3850
        break;
3851
    case RC_DOWN:
3852
        rnd_type = float_round_down;
3853
        break;
3854
    case RC_UP:
3855
        rnd_type = float_round_up;
3856
        break;
3857
    case RC_CHOP:
3858
        rnd_type = float_round_to_zero;
3859
        break;
3860
    }
3861
    set_float_rounding_mode(rnd_type, &env->fp_status);
3862
#ifdef FLOATX80
3863
    switch((env->fpuc >> 8) & 3) {
3864
    case 0:
3865
        rnd_type = 32;
3866
        break;
3867
    case 2:
3868
        rnd_type = 64;
3869
        break;
3870
    case 3:
3871
    default:
3872
        rnd_type = 80;
3873
        break;
3874
    }
3875
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3876
#endif
3877
}
3878

    
3879
void helper_fldcw(uint32_t val)
3880
{
3881
    env->fpuc = val;
3882
    update_fp_status();
3883
}
3884

    
3885
void helper_fclex(void)
3886
{
3887
    env->fpus &= 0x7f00;
3888
}
3889

    
3890
void helper_fwait(void)
3891
{
3892
    if (env->fpus & FPUS_SE)
3893
        fpu_raise_exception();
3894
}
3895

    
3896
void helper_fninit(void)
3897
{
3898
    env->fpus = 0;
3899
    env->fpstt = 0;
3900
    env->fpuc = 0x37f;
3901
    env->fptags[0] = 1;
3902
    env->fptags[1] = 1;
3903
    env->fptags[2] = 1;
3904
    env->fptags[3] = 1;
3905
    env->fptags[4] = 1;
3906
    env->fptags[5] = 1;
3907
    env->fptags[6] = 1;
3908
    env->fptags[7] = 1;
3909
}
3910

    
3911
/* BCD ops */
3912

    
3913
void helper_fbld_ST0(target_ulong ptr)
3914
{
3915
    CPU86_LDouble tmp;
3916
    uint64_t val;
3917
    unsigned int v;
3918
    int i;
3919

    
3920
    val = 0;
3921
    for(i = 8; i >= 0; i--) {
3922
        v = ldub(ptr + i);
3923
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3924
    }
3925
    tmp = val;
3926
    if (ldub(ptr + 9) & 0x80)
3927
        tmp = -tmp;
3928
    fpush();
3929
    ST0 = tmp;
3930
}
3931

    
3932
void helper_fbst_ST0(target_ulong ptr)
3933
{
3934
    int v;
3935
    target_ulong mem_ref, mem_end;
3936
    int64_t val;
3937

    
3938
    val = floatx_to_int64(ST0, &env->fp_status);
3939
    mem_ref = ptr;
3940
    mem_end = mem_ref + 9;
3941
    if (val < 0) {
3942
        stb(mem_end, 0x80);
3943
        val = -val;
3944
    } else {
3945
        stb(mem_end, 0x00);
3946
    }
3947
    while (mem_ref < mem_end) {
3948
        if (val == 0)
3949
            break;
3950
        v = val % 100;
3951
        val = val / 100;
3952
        v = ((v / 10) << 4) | (v % 10);
3953
        stb(mem_ref++, v);
3954
    }
3955
    while (mem_ref < mem_end) {
3956
        stb(mem_ref++, 0);
3957
    }
3958
}
3959

    
3960
void helper_f2xm1(void)
3961
{
3962
    ST0 = pow(2.0,ST0) - 1.0;
3963
}
3964

    
3965
void helper_fyl2x(void)
3966
{
3967
    CPU86_LDouble fptemp;
3968

    
3969
    fptemp = ST0;
3970
    if (fptemp>0.0){
3971
        fptemp = log(fptemp)/log(2.0);         /* log2(ST) */
3972
        ST1 *= fptemp;
3973
        fpop();
3974
    } else {
3975
        env->fpus &= (~0x4700);
3976
        env->fpus |= 0x400;
3977
    }
3978
}
3979

    
3980
void helper_fptan(void)
3981
{
3982
    CPU86_LDouble fptemp;
3983

    
3984
    fptemp = ST0;
3985
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3986
        env->fpus |= 0x400;
3987
    } else {
3988
        ST0 = tan(fptemp);
3989
        fpush();
3990
        ST0 = 1.0;
3991
        env->fpus &= (~0x400);  /* C2 <-- 0 */
3992
        /* the above code is for  |arg| < 2**52 only */
3993
    }
3994
}
3995

    
3996
void helper_fpatan(void)
3997
{
3998
    CPU86_LDouble fptemp, fpsrcop;
3999

    
4000
    fpsrcop = ST1;
4001
    fptemp = ST0;
4002
    ST1 = atan2(fpsrcop,fptemp);
4003
    fpop();
4004
}
4005

    
4006
void helper_fxtract(void)
4007
{
4008
    CPU86_LDoubleU temp;
4009
    unsigned int expdif;
4010

    
4011
    temp.d = ST0;
4012
    expdif = EXPD(temp) - EXPBIAS;
4013
    /*DP exponent bias*/
4014
    ST0 = expdif;
4015
    fpush();
4016
    BIASEXPONENT(temp);
4017
    ST0 = temp.d;
4018
}
4019

    
4020
void helper_fprem1(void)
4021
{
4022
    CPU86_LDouble dblq, fpsrcop, fptemp;
4023
    CPU86_LDoubleU fpsrcop1, fptemp1;
4024
    int expdif;
4025
    signed long long int q;
4026

    
4027
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4028
        ST0 = 0.0 / 0.0; /* NaN */
4029
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4030
        return;
4031
    }
4032

    
4033
    fpsrcop = ST0;
4034
    fptemp = ST1;
4035
    fpsrcop1.d = fpsrcop;
4036
    fptemp1.d = fptemp;
4037
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4038

    
4039
    if (expdif < 0) {
4040
        /* optimisation? taken from the AMD docs */
4041
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4042
        /* ST0 is unchanged */
4043
        return;
4044
    }
4045

    
4046
    if (expdif < 53) {
4047
        dblq = fpsrcop / fptemp;
4048
        /* round dblq towards nearest integer */
4049
        dblq = rint(dblq);
4050
        ST0 = fpsrcop - fptemp * dblq;
4051

    
4052
        /* convert dblq to q by truncating towards zero */
4053
        if (dblq < 0.0)
4054
           q = (signed long long int)(-dblq);
4055
        else
4056
           q = (signed long long int)dblq;
4057

    
4058
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4059
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4060
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4061
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4062
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4063
    } else {
4064
        env->fpus |= 0x400;  /* C2 <-- 1 */
4065
        fptemp = pow(2.0, expdif - 50);
4066
        fpsrcop = (ST0 / ST1) / fptemp;
4067
        /* fpsrcop = integer obtained by chopping */
4068
        fpsrcop = (fpsrcop < 0.0) ?
4069
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4070
        ST0 -= (ST1 * fpsrcop * fptemp);
4071
    }
4072
}
4073

    
4074
void helper_fprem(void)
4075
{
4076
    CPU86_LDouble dblq, fpsrcop, fptemp;
4077
    CPU86_LDoubleU fpsrcop1, fptemp1;
4078
    int expdif;
4079
    signed long long int q;
4080

    
4081
    if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4082
       ST0 = 0.0 / 0.0; /* NaN */
4083
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4084
       return;
4085
    }
4086

    
4087
    fpsrcop = (CPU86_LDouble)ST0;
4088
    fptemp = (CPU86_LDouble)ST1;
4089
    fpsrcop1.d = fpsrcop;
4090
    fptemp1.d = fptemp;
4091
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4092

    
4093
    if (expdif < 0) {
4094
        /* optimisation? taken from the AMD docs */
4095
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4096
        /* ST0 is unchanged */
4097
        return;
4098
    }
4099

    
4100
    if ( expdif < 53 ) {
4101
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4102
        /* round dblq towards zero */
4103
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4104
        ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4105

    
4106
        /* convert dblq to q by truncating towards zero */
4107
        if (dblq < 0.0)
4108
           q = (signed long long int)(-dblq);
4109
        else
4110
           q = (signed long long int)dblq;
4111

    
4112
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4113
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4114
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4115
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4116
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4117
    } else {
4118
        int N = 32 + (expdif % 32); /* as per AMD docs */
4119
        env->fpus |= 0x400;  /* C2 <-- 1 */
4120
        fptemp = pow(2.0, (double)(expdif - N));
4121
        fpsrcop = (ST0 / ST1) / fptemp;
4122
        /* fpsrcop = integer obtained by chopping */
4123
        fpsrcop = (fpsrcop < 0.0) ?
4124
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4125
        ST0 -= (ST1 * fpsrcop * fptemp);
4126
    }
4127
}
4128

    
4129
void helper_fyl2xp1(void)
4130
{
4131
    CPU86_LDouble fptemp;
4132

    
4133
    fptemp = ST0;
4134
    if ((fptemp+1.0)>0.0) {
4135
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4136
        ST1 *= fptemp;
4137
        fpop();
4138
    } else {
4139
        env->fpus &= (~0x4700);
4140
        env->fpus |= 0x400;
4141
    }
4142
}
4143

    
4144
void helper_fsqrt(void)
4145
{
4146
    CPU86_LDouble fptemp;
4147

    
4148
    fptemp = ST0;
4149
    if (fptemp<0.0) {
4150
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4151
        env->fpus |= 0x400;
4152
    }
4153
    ST0 = sqrt(fptemp);
4154
}
4155

    
4156
void helper_fsincos(void)
4157
{
4158
    CPU86_LDouble fptemp;
4159

    
4160
    fptemp = ST0;
4161
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4162
        env->fpus |= 0x400;
4163
    } else {
4164
        ST0 = sin(fptemp);
4165
        fpush();
4166
        ST0 = cos(fptemp);
4167
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4168
        /* the above code is for  |arg| < 2**63 only */
4169
    }
4170
}
4171

    
4172
void helper_frndint(void)
4173
{
4174
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4175
}
4176

    
4177
void helper_fscale(void)
4178
{
4179
    ST0 = ldexp (ST0, (int)(ST1));
4180
}
4181

    
4182
void helper_fsin(void)
4183
{
4184
    CPU86_LDouble fptemp;
4185

    
4186
    fptemp = ST0;
4187
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4188
        env->fpus |= 0x400;
4189
    } else {
4190
        ST0 = sin(fptemp);
4191
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4192
        /* the above code is for  |arg| < 2**53 only */
4193
    }
4194
}
4195

    
4196
void helper_fcos(void)
4197
{
4198
    CPU86_LDouble fptemp;
4199

    
4200
    fptemp = ST0;
4201
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4202
        env->fpus |= 0x400;
4203
    } else {
4204
        ST0 = cos(fptemp);
4205
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4206
        /* the above code is for  |arg5 < 2**63 only */
4207
    }
4208
}
4209

    
4210
void helper_fxam_ST0(void)
4211
{
4212
    CPU86_LDoubleU temp;
4213
    int expdif;
4214

    
4215
    temp.d = ST0;
4216

    
4217
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4218
    if (SIGND(temp))
4219
        env->fpus |= 0x200; /* C1 <-- 1 */
4220

    
4221
    /* XXX: test fptags too */
4222
    expdif = EXPD(temp);
4223
    if (expdif == MAXEXPD) {
4224
#ifdef USE_X86LDOUBLE
4225
        if (MANTD(temp) == 0x8000000000000000ULL)
4226
#else
4227
        if (MANTD(temp) == 0)
4228
#endif
4229
            env->fpus |=  0x500 /*Infinity*/;
4230
        else
4231
            env->fpus |=  0x100 /*NaN*/;
4232
    } else if (expdif == 0) {
4233
        if (MANTD(temp) == 0)
4234
            env->fpus |=  0x4000 /*Zero*/;
4235
        else
4236
            env->fpus |= 0x4400 /*Denormal*/;
4237
    } else {
4238
        env->fpus |= 0x400;
4239
    }
4240
}
4241

    
4242
void helper_fstenv(target_ulong ptr, int data32)
4243
{
4244
    int fpus, fptag, exp, i;
4245
    uint64_t mant;
4246
    CPU86_LDoubleU tmp;
4247

    
4248
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4249
    fptag = 0;
4250
    for (i=7; i>=0; i--) {
4251
        fptag <<= 2;
4252
        if (env->fptags[i]) {
4253
            fptag |= 3;
4254
        } else {
4255
            tmp.d = env->fpregs[i].d;
4256
            exp = EXPD(tmp);
4257
            mant = MANTD(tmp);
4258
            if (exp == 0 && mant == 0) {
4259
                /* zero */
4260
                fptag |= 1;
4261
            } else if (exp == 0 || exp == MAXEXPD
4262
#ifdef USE_X86LDOUBLE
4263
                       || (mant & (1LL << 63)) == 0
4264
#endif
4265
                       ) {
4266
                /* NaNs, infinity, denormal */
4267
                fptag |= 2;
4268
            }
4269
        }
4270
    }
4271
    if (data32) {
4272
        /* 32 bit */
4273
        stl(ptr, env->fpuc);
4274
        stl(ptr + 4, fpus);
4275
        stl(ptr + 8, fptag);
4276
        stl(ptr + 12, 0); /* fpip */
4277
        stl(ptr + 16, 0); /* fpcs */
4278
        stl(ptr + 20, 0); /* fpoo */
4279
        stl(ptr + 24, 0); /* fpos */
4280
    } else {
4281
        /* 16 bit */
4282
        stw(ptr, env->fpuc);
4283
        stw(ptr + 2, fpus);
4284
        stw(ptr + 4, fptag);
4285
        stw(ptr + 6, 0);
4286
        stw(ptr + 8, 0);
4287
        stw(ptr + 10, 0);
4288
        stw(ptr + 12, 0);
4289
    }
4290
}
4291

    
4292
void helper_fldenv(target_ulong ptr, int data32)
4293
{
4294
    int i, fpus, fptag;
4295

    
4296
    if (data32) {
4297
        env->fpuc = lduw(ptr);
4298
        fpus = lduw(ptr + 4);
4299
        fptag = lduw(ptr + 8);
4300
    }
4301
    else {
4302
        env->fpuc = lduw(ptr);
4303
        fpus = lduw(ptr + 2);
4304
        fptag = lduw(ptr + 4);
4305
    }
4306
    env->fpstt = (fpus >> 11) & 7;
4307
    env->fpus = fpus & ~0x3800;
4308
    for(i = 0;i < 8; i++) {
4309
        env->fptags[i] = ((fptag & 3) == 3);
4310
        fptag >>= 2;
4311
    }
4312
}
4313

    
4314
void helper_fsave(target_ulong ptr, int data32)
4315
{
4316
    CPU86_LDouble tmp;
4317
    int i;
4318

    
4319
    helper_fstenv(ptr, data32);
4320

    
4321
    ptr += (14 << data32);
4322
    for(i = 0;i < 8; i++) {
4323
        tmp = ST(i);
4324
        helper_fstt(tmp, ptr);
4325
        ptr += 10;
4326
    }
4327

    
4328
    /* fninit */
4329
    env->fpus = 0;
4330
    env->fpstt = 0;
4331
    env->fpuc = 0x37f;
4332
    env->fptags[0] = 1;
4333
    env->fptags[1] = 1;
4334
    env->fptags[2] = 1;
4335
    env->fptags[3] = 1;
4336
    env->fptags[4] = 1;
4337
    env->fptags[5] = 1;
4338
    env->fptags[6] = 1;
4339
    env->fptags[7] = 1;
4340
}
4341

    
4342
void helper_frstor(target_ulong ptr, int data32)
4343
{
4344
    CPU86_LDouble tmp;
4345
    int i;
4346

    
4347
    helper_fldenv(ptr, data32);
4348
    ptr += (14 << data32);
4349

    
4350
    for(i = 0;i < 8; i++) {
4351
        tmp = helper_fldt(ptr);
4352
        ST(i) = tmp;
4353
        ptr += 10;
4354
    }
4355
}
4356

    
4357
void helper_fxsave(target_ulong ptr, int data64)
4358
{
4359
    int fpus, fptag, i, nb_xmm_regs;
4360
    CPU86_LDouble tmp;
4361
    target_ulong addr;
4362

    
4363
    /* The operand must be 16 byte aligned */
4364
    if (ptr & 0xf) {
4365
        raise_exception(EXCP0D_GPF);
4366
    }
4367

    
4368
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4369
    fptag = 0;
4370
    for(i = 0; i < 8; i++) {
4371
        fptag |= (env->fptags[i] << i);
4372
    }
4373
    stw(ptr, env->fpuc);
4374
    stw(ptr + 2, fpus);
4375
    stw(ptr + 4, fptag ^ 0xff);
4376
#ifdef TARGET_X86_64
4377
    if (data64) {
4378
        stq(ptr + 0x08, 0); /* rip */
4379
        stq(ptr + 0x10, 0); /* rdp */
4380
    } else 
4381
#endif
4382
    {
4383
        stl(ptr + 0x08, 0); /* eip */
4384
        stl(ptr + 0x0c, 0); /* sel  */
4385
        stl(ptr + 0x10, 0); /* dp */
4386
        stl(ptr + 0x14, 0); /* sel  */
4387
    }
4388

    
4389
    addr = ptr + 0x20;
4390
    for(i = 0;i < 8; i++) {
4391
        tmp = ST(i);
4392
        helper_fstt(tmp, addr);
4393
        addr += 16;
4394
    }
4395

    
4396
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4397
        /* XXX: finish it */
4398
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4399
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4400
        if (env->hflags & HF_CS64_MASK)
4401
            nb_xmm_regs = 16;
4402
        else
4403
            nb_xmm_regs = 8;
4404
        addr = ptr + 0xa0;
4405
        /* Fast FXSAVE leaves out the XMM registers */
4406
        if (!(env->efer & MSR_EFER_FFXSR)
4407
          || (env->hflags & HF_CPL_MASK)
4408
          || !(env->hflags & HF_LMA_MASK)) {
4409
            for(i = 0; i < nb_xmm_regs; i++) {
4410
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4411
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4412
                addr += 16;
4413
            }
4414
        }
4415
    }
4416
}
4417

    
4418
void helper_fxrstor(target_ulong ptr, int data64)
4419
{
4420
    int i, fpus, fptag, nb_xmm_regs;
4421
    CPU86_LDouble tmp;
4422
    target_ulong addr;
4423

    
4424
    /* The operand must be 16 byte aligned */
4425
    if (ptr & 0xf) {
4426
        raise_exception(EXCP0D_GPF);
4427
    }
4428

    
4429
    env->fpuc = lduw(ptr);
4430
    fpus = lduw(ptr + 2);
4431
    fptag = lduw(ptr + 4);
4432
    env->fpstt = (fpus >> 11) & 7;
4433
    env->fpus = fpus & ~0x3800;
4434
    fptag ^= 0xff;
4435
    for(i = 0;i < 8; i++) {
4436
        env->fptags[i] = ((fptag >> i) & 1);
4437
    }
4438

    
4439
    addr = ptr + 0x20;
4440
    for(i = 0;i < 8; i++) {
4441
        tmp = helper_fldt(addr);
4442
        ST(i) = tmp;
4443
        addr += 16;
4444
    }
4445

    
4446
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4447
        /* XXX: finish it */
4448
        env->mxcsr = ldl(ptr + 0x18);
4449
        //ldl(ptr + 0x1c);
4450
        if (env->hflags & HF_CS64_MASK)
4451
            nb_xmm_regs = 16;
4452
        else
4453
            nb_xmm_regs = 8;
4454
        addr = ptr + 0xa0;
4455
        /* Fast FXRESTORE leaves out the XMM registers */
4456
        if (!(env->efer & MSR_EFER_FFXSR)
4457
          || (env->hflags & HF_CPL_MASK)
4458
          || !(env->hflags & HF_LMA_MASK)) {
4459
            for(i = 0; i < nb_xmm_regs; i++) {
4460
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4461
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4462
                addr += 16;
4463
            }
4464
        }
4465
    }
4466
}
4467

    
4468
#ifndef USE_X86LDOUBLE
4469

    
4470
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4471
{
4472
    CPU86_LDoubleU temp;
4473
    int e;
4474

    
4475
    temp.d = f;
4476
    /* mantissa */
4477
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4478
    /* exponent + sign */
4479
    e = EXPD(temp) - EXPBIAS + 16383;
4480
    e |= SIGND(temp) >> 16;
4481
    *pexp = e;
4482
}
4483

    
4484
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4485
{
4486
    CPU86_LDoubleU temp;
4487
    int e;
4488
    uint64_t ll;
4489

    
4490
    /* XXX: handle overflow ? */
4491
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4492
    e |= (upper >> 4) & 0x800; /* sign */
4493
    ll = (mant >> 11) & ((1LL << 52) - 1);
4494
#ifdef __arm__
4495
    temp.l.upper = (e << 20) | (ll >> 32);
4496
    temp.l.lower = ll;
4497
#else
4498
    temp.ll = ll | ((uint64_t)e << 52);
4499
#endif
4500
    return temp.d;
4501
}
4502

    
4503
#else
4504

    
4505
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4506
{
4507
    CPU86_LDoubleU temp;
4508

    
4509
    temp.d = f;
4510
    *pmant = temp.l.lower;
4511
    *pexp = temp.l.upper;
4512
}
4513

    
4514
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4515
{
4516
    CPU86_LDoubleU temp;
4517

    
4518
    temp.l.upper = upper;
4519
    temp.l.lower = mant;
4520
    return temp.d;
4521
}
4522
#endif
4523

    
4524
#ifdef TARGET_X86_64
4525

    
4526
//#define DEBUG_MULDIV
4527

    
4528
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4529
{
4530
    *plow += a;
4531
    /* carry test */
4532
    if (*plow < a)
4533
        (*phigh)++;
4534
    *phigh += b;
4535
}
4536

    
4537
static void neg128(uint64_t *plow, uint64_t *phigh)
4538
{
4539
    *plow = ~ *plow;
4540
    *phigh = ~ *phigh;
4541
    add128(plow, phigh, 1, 0);
4542
}
4543

    
4544
/* return TRUE if overflow */
4545
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4546
{
4547
    uint64_t q, r, a1, a0;
4548
    int i, qb, ab;
4549

    
4550
    a0 = *plow;
4551
    a1 = *phigh;
4552
    if (a1 == 0) {
4553
        q = a0 / b;
4554
        r = a0 % b;
4555
        *plow = q;
4556
        *phigh = r;
4557
    } else {
4558
        if (a1 >= b)
4559
            return 1;
4560
        /* XXX: use a better algorithm */
4561
        for(i = 0; i < 64; i++) {
4562
            ab = a1 >> 63;
4563
            a1 = (a1 << 1) | (a0 >> 63);
4564
            if (ab || a1 >= b) {
4565
                a1 -= b;
4566
                qb = 1;
4567
            } else {
4568
                qb = 0;
4569
            }
4570
            a0 = (a0 << 1) | qb;
4571
        }
4572
#if defined(DEBUG_MULDIV)
4573
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4574
               *phigh, *plow, b, a0, a1);
4575
#endif
4576
        *plow = a0;
4577
        *phigh = a1;
4578
    }
4579
    return 0;
4580
}
4581

    
4582
/* return TRUE if overflow */
4583
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4584
{
4585
    int sa, sb;
4586
    sa = ((int64_t)*phigh < 0);
4587
    if (sa)
4588
        neg128(plow, phigh);
4589
    sb = (b < 0);
4590
    if (sb)
4591
        b = -b;
4592
    if (div64(plow, phigh, b) != 0)
4593
        return 1;
4594
    if (sa ^ sb) {
4595
        if (*plow > (1ULL << 63))
4596
            return 1;
4597
        *plow = - *plow;
4598
    } else {
4599
        if (*plow >= (1ULL << 63))
4600
            return 1;
4601
    }
4602
    if (sa)
4603
        *phigh = - *phigh;
4604
    return 0;
4605
}
4606

    
4607
void helper_mulq_EAX_T0(target_ulong t0)
4608
{
4609
    uint64_t r0, r1;
4610

    
4611
    mulu64(&r0, &r1, EAX, t0);
4612
    EAX = r0;
4613
    EDX = r1;
4614
    CC_DST = r0;
4615
    CC_SRC = r1;
4616
}
4617

    
4618
void helper_imulq_EAX_T0(target_ulong t0)
4619
{
4620
    uint64_t r0, r1;
4621

    
4622
    muls64(&r0, &r1, EAX, t0);
4623
    EAX = r0;
4624
    EDX = r1;
4625
    CC_DST = r0;
4626
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4627
}
4628

    
4629
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4630
{
4631
    uint64_t r0, r1;
4632

    
4633
    muls64(&r0, &r1, t0, t1);
4634
    CC_DST = r0;
4635
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4636
    return r0;
4637
}
4638

    
4639
void helper_divq_EAX(target_ulong t0)
4640
{
4641
    uint64_t r0, r1;
4642
    if (t0 == 0) {
4643
        raise_exception(EXCP00_DIVZ);
4644
    }
4645
    r0 = EAX;
4646
    r1 = EDX;
4647
    if (div64(&r0, &r1, t0))
4648
        raise_exception(EXCP00_DIVZ);
4649
    EAX = r0;
4650
    EDX = r1;
4651
}
4652

    
4653
void helper_idivq_EAX(target_ulong t0)
4654
{
4655
    uint64_t r0, r1;
4656
    if (t0 == 0) {
4657
        raise_exception(EXCP00_DIVZ);
4658
    }
4659
    r0 = EAX;
4660
    r1 = EDX;
4661
    if (idiv64(&r0, &r1, t0))
4662
        raise_exception(EXCP00_DIVZ);
4663
    EAX = r0;
4664
    EDX = r1;
4665
}
4666
#endif
4667

    
4668
static void do_hlt(void)
4669
{
4670
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4671
    env->halted = 1;
4672
    env->exception_index = EXCP_HLT;
4673
    cpu_loop_exit();
4674
}
4675

    
4676
void helper_hlt(int next_eip_addend)
4677
{
4678
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4679
    EIP += next_eip_addend;
4680
    
4681
    do_hlt();
4682
}
4683

    
4684
void helper_monitor(target_ulong ptr)
4685
{
4686
    if ((uint32_t)ECX != 0)
4687
        raise_exception(EXCP0D_GPF);
4688
    /* XXX: store address ? */
4689
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4690
}
4691

    
4692
void helper_mwait(int next_eip_addend)
4693
{
4694
    if ((uint32_t)ECX != 0)
4695
        raise_exception(EXCP0D_GPF);
4696
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4697
    EIP += next_eip_addend;
4698

    
4699
    /* XXX: not complete but not completely erroneous */
4700
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4701
        /* more than one CPU: do not sleep because another CPU may
4702
           wake this one */
4703
    } else {
4704
        do_hlt();
4705
    }
4706
}
4707

    
4708
void helper_debug(void)
4709
{
4710
    env->exception_index = EXCP_DEBUG;
4711
    cpu_loop_exit();
4712
}
4713

    
4714
void helper_reset_rf(void)
4715
{
4716
    env->eflags &= ~RF_MASK;
4717
}
4718

    
4719
void helper_raise_interrupt(int intno, int next_eip_addend)
4720
{
4721
    raise_interrupt(intno, 1, 0, next_eip_addend);
4722
}
4723

    
4724
void helper_raise_exception(int exception_index)
4725
{
4726
    raise_exception(exception_index);
4727
}
4728

    
4729
void helper_cli(void)
4730
{
4731
    env->eflags &= ~IF_MASK;
4732
}
4733

    
4734
void helper_sti(void)
4735
{
4736
    env->eflags |= IF_MASK;
4737
}
4738

    
4739
#if 0
4740
/* vm86plus instructions */
4741
void helper_cli_vm(void)
4742
{
4743
    env->eflags &= ~VIF_MASK;
4744
}
4745

4746
void helper_sti_vm(void)
4747
{
4748
    env->eflags |= VIF_MASK;
4749
    if (env->eflags & VIP_MASK) {
4750
        raise_exception(EXCP0D_GPF);
4751
    }
4752
}
4753
#endif
4754

    
4755
void helper_set_inhibit_irq(void)
4756
{
4757
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4758
}
4759

    
4760
void helper_reset_inhibit_irq(void)
4761
{
4762
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4763
}
4764

    
4765
void helper_boundw(target_ulong a0, int v)
4766
{
4767
    int low, high;
4768
    low = ldsw(a0);
4769
    high = ldsw(a0 + 2);
4770
    v = (int16_t)v;
4771
    if (v < low || v > high) {
4772
        raise_exception(EXCP05_BOUND);
4773
    }
4774
}
4775

    
4776
void helper_boundl(target_ulong a0, int v)
4777
{
4778
    int low, high;
4779
    low = ldl(a0);
4780
    high = ldl(a0 + 4);
4781
    if (v < low || v > high) {
4782
        raise_exception(EXCP05_BOUND);
4783
    }
4784
}
4785

    
4786
static float approx_rsqrt(float a)
4787
{
4788
    return 1.0 / sqrt(a);
4789
}
4790

    
4791
static float approx_rcp(float a)
4792
{
4793
    return 1.0 / a;
4794
}
4795

    
4796
#if !defined(CONFIG_USER_ONLY)
4797

    
4798
#define MMUSUFFIX _mmu
4799

    
4800
#define SHIFT 0
4801
#include "softmmu_template.h"
4802

    
4803
#define SHIFT 1
4804
#include "softmmu_template.h"
4805

    
4806
#define SHIFT 2
4807
#include "softmmu_template.h"
4808

    
4809
#define SHIFT 3
4810
#include "softmmu_template.h"
4811

    
4812
#endif
4813

    
4814
#if !defined(CONFIG_USER_ONLY)
4815
/* try to fill the TLB and return an exception if error. If retaddr is
4816
   NULL, it means that the function was called in C code (i.e. not
4817
   from generated code or from helper.c) */
4818
/* XXX: fix it to restore all registers */
4819
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4820
{
4821
    TranslationBlock *tb;
4822
    int ret;
4823
    unsigned long pc;
4824
    CPUX86State *saved_env;
4825

    
4826
    /* XXX: hack to restore env in all cases, even if not called from
4827
       generated code */
4828
    saved_env = env;
4829
    env = cpu_single_env;
4830

    
4831
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4832
    if (ret) {
4833
        if (retaddr) {
4834
            /* now we have a real cpu fault */
4835
            pc = (unsigned long)retaddr;
4836
            tb = tb_find_pc(pc);
4837
            if (tb) {
4838
                /* the PC is inside the translated code. It means that we have
4839
                   a virtual CPU fault */
4840
                cpu_restore_state(tb, env, pc, NULL);
4841
            }
4842
        }
4843
        raise_exception_err(env->exception_index, env->error_code);
4844
    }
4845
    env = saved_env;
4846
}
4847
#endif
4848

    
4849
/* Secure Virtual Machine helpers */
4850

    
4851
#if defined(CONFIG_USER_ONLY)
4852

    
4853
void helper_vmrun(int aflag, int next_eip_addend)
4854
{ 
4855
}
4856
void helper_vmmcall(void) 
4857
{ 
4858
}
4859
void helper_vmload(int aflag)
4860
{ 
4861
}
4862
void helper_vmsave(int aflag)
4863
{ 
4864
}
4865
void helper_stgi(void)
4866
{
4867
}
4868
void helper_clgi(void)
4869
{
4870
}
4871
void helper_skinit(void) 
4872
{ 
4873
}
4874
void helper_invlpga(int aflag)
4875
{ 
4876
}
4877
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4878
{ 
4879
}
4880
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4881
{
4882
}
4883

    
4884
void helper_svm_check_io(uint32_t port, uint32_t param, 
4885
                         uint32_t next_eip_addend)
4886
{
4887
}
4888
#else
4889

    
4890
static inline void svm_save_seg(target_phys_addr_t addr,
4891
                                const SegmentCache *sc)
4892
{
4893
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4894
             sc->selector);
4895
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4896
             sc->base);
4897
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4898
             sc->limit);
4899
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4900
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4901
}
4902
                                
4903
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4904
{
4905
    unsigned int flags;
4906

    
4907
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4908
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4909
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4910
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4911
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4912
}
4913

    
4914
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4915
                                      CPUState *env, int seg_reg)
4916
{
4917
    SegmentCache sc1, *sc = &sc1;
4918
    svm_load_seg(addr, sc);
4919
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4920
                           sc->base, sc->limit, sc->flags);
4921
}
4922

    
4923
void helper_vmrun(int aflag, int next_eip_addend)
4924
{
4925
    target_ulong addr;
4926
    uint32_t event_inj;
4927
    uint32_t int_ctl;
4928

    
4929
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4930

    
4931
    if (aflag == 2)
4932
        addr = EAX;
4933
    else
4934
        addr = (uint32_t)EAX;
4935

    
4936
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4937

    
4938
    env->vm_vmcb = addr;
4939

    
4940
    /* save the current CPU state in the hsave page */
4941
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4942
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4943

    
4944
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4945
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4946

    
4947
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4948
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4949
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4950
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4951
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4952
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4953

    
4954
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4955
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4956

    
4957
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4958
                  &env->segs[R_ES]);
4959
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4960
                 &env->segs[R_CS]);
4961
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4962
                 &env->segs[R_SS]);
4963
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4964
                 &env->segs[R_DS]);
4965

    
4966
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4967
             EIP + next_eip_addend);
4968
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4969
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4970

    
4971
    /* load the interception bitmaps so we do not need to access the
4972
       vmcb in svm mode */
4973
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4974
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4975
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4976
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4977
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4978
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4979

    
4980
    /* enable intercepts */
4981
    env->hflags |= HF_SVMI_MASK;
4982

    
4983
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4984

    
4985
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4986
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4987

    
4988
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4989
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4990

    
4991
    /* clear exit_info_2 so we behave like the real hardware */
4992
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4993

    
4994
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4995
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4996
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4997
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4998
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4999
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5000
    if (int_ctl & V_INTR_MASKING_MASK) {
5001
        env->v_tpr = int_ctl & V_TPR_MASK;
5002
        env->hflags2 |= HF2_VINTR_MASK;
5003
        if (env->eflags & IF_MASK)
5004
            env->hflags2 |= HF2_HIF_MASK;
5005
    }
5006

    
5007
    cpu_load_efer(env, 
5008
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5009
    env->eflags = 0;
5010
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5011
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5012
    CC_OP = CC_OP_EFLAGS;
5013

    
5014
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5015
                       env, R_ES);
5016
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5017
                       env, R_CS);
5018
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5019
                       env, R_SS);
5020
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5021
                       env, R_DS);
5022

    
5023
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5024
    env->eip = EIP;
5025
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5026
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5027
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5028
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5029
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5030

    
5031
    /* FIXME: guest state consistency checks */
5032

    
5033
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5034
        case TLB_CONTROL_DO_NOTHING:
5035
            break;
5036
        case TLB_CONTROL_FLUSH_ALL_ASID:
5037
            /* FIXME: this is not 100% correct but should work for now */
5038
            tlb_flush(env, 1);
5039
        break;
5040
    }
5041

    
5042
    env->hflags2 |= HF2_GIF_MASK;
5043

    
5044
    if (int_ctl & V_IRQ_MASK) {
5045
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5046
    }
5047

    
5048
    /* maybe we need to inject an event */
5049
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5050
    if (event_inj & SVM_EVTINJ_VALID) {
5051
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5052
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5053
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5054

    
5055
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5056
        /* FIXME: need to implement valid_err */
5057
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5058
        case SVM_EVTINJ_TYPE_INTR:
5059
                env->exception_index = vector;
5060
                env->error_code = event_inj_err;
5061
                env->exception_is_int = 0;
5062
                env->exception_next_eip = -1;
5063
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5064
                /* XXX: is it always correct ? */
5065
                do_interrupt(vector, 0, 0, 0, 1);
5066
                break;
5067
        case SVM_EVTINJ_TYPE_NMI:
5068
                env->exception_index = EXCP02_NMI;
5069
                env->error_code = event_inj_err;
5070
                env->exception_is_int = 0;
5071
                env->exception_next_eip = EIP;
5072
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5073
                cpu_loop_exit();
5074
                break;
5075
        case SVM_EVTINJ_TYPE_EXEPT:
5076
                env->exception_index = vector;
5077
                env->error_code = event_inj_err;
5078
                env->exception_is_int = 0;
5079
                env->exception_next_eip = -1;
5080
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5081
                cpu_loop_exit();
5082
                break;
5083
        case SVM_EVTINJ_TYPE_SOFT:
5084
                env->exception_index = vector;
5085
                env->error_code = event_inj_err;
5086
                env->exception_is_int = 1;
5087
                env->exception_next_eip = EIP;
5088
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5089
                cpu_loop_exit();
5090
                break;
5091
        }
5092
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5093
    }
5094
}
5095

    
5096
void helper_vmmcall(void)
5097
{
5098
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5099
    raise_exception(EXCP06_ILLOP);
5100
}
5101

    
5102
void helper_vmload(int aflag)
5103
{
5104
    target_ulong addr;
5105
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5106

    
5107
    if (aflag == 2)
5108
        addr = EAX;
5109
    else
5110
        addr = (uint32_t)EAX;
5111

    
5112
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5113
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5114
                env->segs[R_FS].base);
5115

    
5116
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5117
                       env, R_FS);
5118
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5119
                       env, R_GS);
5120
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5121
                 &env->tr);
5122
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5123
                 &env->ldt);
5124

    
5125
#ifdef TARGET_X86_64
5126
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5127
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5128
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5129
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5130
#endif
5131
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5132
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5133
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5134
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5135
}
5136

    
5137
void helper_vmsave(int aflag)
5138
{
5139
    target_ulong addr;
5140
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5141

    
5142
    if (aflag == 2)
5143
        addr = EAX;
5144
    else
5145
        addr = (uint32_t)EAX;
5146

    
5147
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5148
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5149
                env->segs[R_FS].base);
5150

    
5151
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5152
                 &env->segs[R_FS]);
5153
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5154
                 &env->segs[R_GS]);
5155
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5156
                 &env->tr);
5157
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5158
                 &env->ldt);
5159

    
5160
#ifdef TARGET_X86_64
5161
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5162
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5163
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5164
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5165
#endif
5166
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5167
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5168
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5169
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5170
}
5171

    
5172
void helper_stgi(void)
5173
{
5174
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5175
    env->hflags2 |= HF2_GIF_MASK;
5176
}
5177

    
5178
void helper_clgi(void)
5179
{
5180
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5181
    env->hflags2 &= ~HF2_GIF_MASK;
5182
}
5183

    
5184
void helper_skinit(void)
5185
{
5186
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5187
    /* XXX: not implemented */
5188
    raise_exception(EXCP06_ILLOP);
5189
}
5190

    
5191
void helper_invlpga(int aflag)
5192
{
5193
    target_ulong addr;
5194
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5195
    
5196
    if (aflag == 2)
5197
        addr = EAX;
5198
    else
5199
        addr = (uint32_t)EAX;
5200

    
5201
    /* XXX: could use the ASID to see if it is needed to do the
5202
       flush */
5203
    tlb_flush_page(env, addr);
5204
}
5205

    
5206
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5207
{
5208
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5209
        return;
5210
    switch(type) {
5211
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5212
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5213
            helper_vmexit(type, param);
5214
        }
5215
        break;
5216
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5217
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5218
            helper_vmexit(type, param);
5219
        }
5220
        break;
5221
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5222
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5223
            helper_vmexit(type, param);
5224
        }
5225
        break;
5226
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5227
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5228
            helper_vmexit(type, param);
5229
        }
5230
        break;
5231
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5232
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5233
            helper_vmexit(type, param);
5234
        }
5235
        break;
5236
    case SVM_EXIT_MSR:
5237
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5238
            /* FIXME: this should be read in at vmrun (faster this way?) */
5239
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5240
            uint32_t t0, t1;
5241
            switch((uint32_t)ECX) {
5242
            case 0 ... 0x1fff:
5243
                t0 = (ECX * 2) % 8;
5244
                t1 = (ECX * 2) / 8;
5245
                break;
5246
            case 0xc0000000 ... 0xc0001fff:
5247
                t0 = (8192 + ECX - 0xc0000000) * 2;
5248
                t1 = (t0 / 8);
5249
                t0 %= 8;
5250
                break;
5251
            case 0xc0010000 ... 0xc0011fff:
5252
                t0 = (16384 + ECX - 0xc0010000) * 2;
5253
                t1 = (t0 / 8);
5254
                t0 %= 8;
5255
                break;
5256
            default:
5257
                helper_vmexit(type, param);
5258
                t0 = 0;
5259
                t1 = 0;
5260
                break;
5261
            }
5262
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5263
                helper_vmexit(type, param);
5264
        }
5265
        break;
5266
    default:
5267
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5268
            helper_vmexit(type, param);
5269
        }
5270
        break;
5271
    }
5272
}
5273

    
5274
void helper_svm_check_io(uint32_t port, uint32_t param, 
5275
                         uint32_t next_eip_addend)
5276
{
5277
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5278
        /* FIXME: this should be read in at vmrun (faster this way?) */
5279
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5280
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5281
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5282
            /* next EIP */
5283
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5284
                     env->eip + next_eip_addend);
5285
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5286
        }
5287
    }
5288
}
5289

    
5290
/* Note: currently only 32 bits of exit_code are used */
5291
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5292
{
5293
    uint32_t int_ctl;
5294

    
5295
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5296
                exit_code, exit_info_1,
5297
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5298
                EIP);
5299

    
5300
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5301
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5302
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5303
    } else {
5304
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5305
    }
5306

    
5307
    /* Save the VM state in the vmcb */
5308
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5309
                 &env->segs[R_ES]);
5310
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5311
                 &env->segs[R_CS]);
5312
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5313
                 &env->segs[R_SS]);
5314
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5315
                 &env->segs[R_DS]);
5316

    
5317
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5318
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5319

    
5320
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5321
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5322

    
5323
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5324
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5325
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5326
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5327
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5328

    
5329
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5330
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5331
    int_ctl |= env->v_tpr & V_TPR_MASK;
5332
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5333
        int_ctl |= V_IRQ_MASK;
5334
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5335

    
5336
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5337
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5338
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5339
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5340
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5341
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5342
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5343

    
5344
    /* Reload the host state from vm_hsave */
5345
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5346
    env->hflags &= ~HF_SVMI_MASK;
5347
    env->intercept = 0;
5348
    env->intercept_exceptions = 0;
5349
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5350
    env->tsc_offset = 0;
5351

    
5352
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5353
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5354

    
5355
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5356
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5357

    
5358
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5359
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5360
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5361
    /* we need to set the efer after the crs so the hidden flags get
5362
       set properly */
5363
    cpu_load_efer(env, 
5364
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5365
    env->eflags = 0;
5366
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5367
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5368
    CC_OP = CC_OP_EFLAGS;
5369

    
5370
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5371
                       env, R_ES);
5372
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5373
                       env, R_CS);
5374
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5375
                       env, R_SS);
5376
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5377
                       env, R_DS);
5378

    
5379
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5380
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5381
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5382

    
5383
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5384
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5385

    
5386
    /* other setups */
5387
    cpu_x86_set_cpl(env, 0);
5388
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5389
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5390

    
5391
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5392
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5393
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5394
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5395
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5396

    
5397
    env->hflags2 &= ~HF2_GIF_MASK;
5398
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5399

    
5400
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5401

    
5402
    /* Clears the TSC_OFFSET inside the processor. */
5403

    
5404
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5405
       from the page table indicated the host's CR3. If the PDPEs contain
5406
       illegal state, the processor causes a shutdown. */
5407

    
5408
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5409
    env->cr[0] |= CR0_PE_MASK;
5410
    env->eflags &= ~VM_MASK;
5411

    
5412
    /* Disables all breakpoints in the host DR7 register. */
5413

    
5414
    /* Checks the reloaded host state for consistency. */
5415

    
5416
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5417
       host's code segment or non-canonical (in the case of long mode), a
5418
       #GP fault is delivered inside the host.) */
5419

    
5420
    /* remove any pending exception */
5421
    env->exception_index = -1;
5422
    env->error_code = 0;
5423
    env->old_exception = -1;
5424

    
5425
    cpu_loop_exit();
5426
}
5427

    
5428
#endif
5429

    
5430
/* MMX/SSE */
5431
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5432
void helper_enter_mmx(void)
5433
{
5434
    env->fpstt = 0;
5435
    *(uint32_t *)(env->fptags) = 0;
5436
    *(uint32_t *)(env->fptags + 4) = 0;
5437
}
5438

    
5439
void helper_emms(void)
5440
{
5441
    /* set to empty state */
5442
    *(uint32_t *)(env->fptags) = 0x01010101;
5443
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5444
}
5445

    
5446
/* XXX: suppress */
5447
void helper_movq(void *d, void *s)
5448
{
5449
    *(uint64_t *)d = *(uint64_t *)s;
5450
}
5451

    
5452
#define SHIFT 0
5453
#include "ops_sse.h"
5454

    
5455
#define SHIFT 1
5456
#include "ops_sse.h"
5457

    
5458
#define SHIFT 0
5459
#include "helper_template.h"
5460
#undef SHIFT
5461

    
5462
#define SHIFT 1
5463
#include "helper_template.h"
5464
#undef SHIFT
5465

    
5466
#define SHIFT 2
5467
#include "helper_template.h"
5468
#undef SHIFT
5469

    
5470
#ifdef TARGET_X86_64
5471

    
5472
#define SHIFT 3
5473
#include "helper_template.h"
5474
#undef SHIFT
5475

    
5476
#endif
5477

    
5478
/* bit operations */
5479
target_ulong helper_bsf(target_ulong t0)
5480
{
5481
    int count;
5482
    target_ulong res;
5483

    
5484
    res = t0;
5485
    count = 0;
5486
    while ((res & 1) == 0) {
5487
        count++;
5488
        res >>= 1;
5489
    }
5490
    return count;
5491
}
5492

    
5493
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5494
{
5495
    int count;
5496
    target_ulong res, mask;
5497

    
5498
    if (wordsize > 0 && t0 == 0) {
5499
        return wordsize;
5500
    }
5501
    res = t0;
5502
    count = TARGET_LONG_BITS - 1;
5503
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5504
    while ((res & mask) == 0) {
5505
        count--;
5506
        res <<= 1;
5507
    }
5508
    if (wordsize > 0) {
5509
        return wordsize - 1 - count;
5510
    }
5511
    return count;
5512
}
5513

    
5514
target_ulong helper_bsr(target_ulong t0)
5515
{
5516
        return helper_lzcnt(t0, 0);
5517
}
5518

    
5519
static int compute_all_eflags(void)
5520
{
5521
    return CC_SRC;
5522
}
5523

    
5524
static int compute_c_eflags(void)
5525
{
5526
    return CC_SRC & CC_C;
5527
}
5528

    
5529
uint32_t helper_cc_compute_all(int op)
5530
{
5531
    switch (op) {
5532
    default: /* should never happen */ return 0;
5533

    
5534
    case CC_OP_EFLAGS: return compute_all_eflags();
5535

    
5536
    case CC_OP_MULB: return compute_all_mulb();
5537
    case CC_OP_MULW: return compute_all_mulw();
5538
    case CC_OP_MULL: return compute_all_mull();
5539

    
5540
    case CC_OP_ADDB: return compute_all_addb();
5541
    case CC_OP_ADDW: return compute_all_addw();
5542
    case CC_OP_ADDL: return compute_all_addl();
5543

    
5544
    case CC_OP_ADCB: return compute_all_adcb();
5545
    case CC_OP_ADCW: return compute_all_adcw();
5546
    case CC_OP_ADCL: return compute_all_adcl();
5547

    
5548
    case CC_OP_SUBB: return compute_all_subb();
5549
    case CC_OP_SUBW: return compute_all_subw();
5550
    case CC_OP_SUBL: return compute_all_subl();
5551

    
5552
    case CC_OP_SBBB: return compute_all_sbbb();
5553
    case CC_OP_SBBW: return compute_all_sbbw();
5554
    case CC_OP_SBBL: return compute_all_sbbl();
5555

    
5556
    case CC_OP_LOGICB: return compute_all_logicb();
5557
    case CC_OP_LOGICW: return compute_all_logicw();
5558
    case CC_OP_LOGICL: return compute_all_logicl();
5559

    
5560
    case CC_OP_INCB: return compute_all_incb();
5561
    case CC_OP_INCW: return compute_all_incw();
5562
    case CC_OP_INCL: return compute_all_incl();
5563

    
5564
    case CC_OP_DECB: return compute_all_decb();
5565
    case CC_OP_DECW: return compute_all_decw();
5566
    case CC_OP_DECL: return compute_all_decl();
5567

    
5568
    case CC_OP_SHLB: return compute_all_shlb();
5569
    case CC_OP_SHLW: return compute_all_shlw();
5570
    case CC_OP_SHLL: return compute_all_shll();
5571

    
5572
    case CC_OP_SARB: return compute_all_sarb();
5573
    case CC_OP_SARW: return compute_all_sarw();
5574
    case CC_OP_SARL: return compute_all_sarl();
5575

    
5576
#ifdef TARGET_X86_64
5577
    case CC_OP_MULQ: return compute_all_mulq();
5578

    
5579
    case CC_OP_ADDQ: return compute_all_addq();
5580

    
5581
    case CC_OP_ADCQ: return compute_all_adcq();
5582

    
5583
    case CC_OP_SUBQ: return compute_all_subq();
5584

    
5585
    case CC_OP_SBBQ: return compute_all_sbbq();
5586

    
5587
    case CC_OP_LOGICQ: return compute_all_logicq();
5588

    
5589
    case CC_OP_INCQ: return compute_all_incq();
5590

    
5591
    case CC_OP_DECQ: return compute_all_decq();
5592

    
5593
    case CC_OP_SHLQ: return compute_all_shlq();
5594

    
5595
    case CC_OP_SARQ: return compute_all_sarq();
5596
#endif
5597
    }
5598
}
5599

    
5600
uint32_t helper_cc_compute_c(int op)
5601
{
5602
    switch (op) {
5603
    default: /* should never happen */ return 0;
5604

    
5605
    case CC_OP_EFLAGS: return compute_c_eflags();
5606

    
5607
    case CC_OP_MULB: return compute_c_mull();
5608
    case CC_OP_MULW: return compute_c_mull();
5609
    case CC_OP_MULL: return compute_c_mull();
5610

    
5611
    case CC_OP_ADDB: return compute_c_addb();
5612
    case CC_OP_ADDW: return compute_c_addw();
5613
    case CC_OP_ADDL: return compute_c_addl();
5614

    
5615
    case CC_OP_ADCB: return compute_c_adcb();
5616
    case CC_OP_ADCW: return compute_c_adcw();
5617
    case CC_OP_ADCL: return compute_c_adcl();
5618

    
5619
    case CC_OP_SUBB: return compute_c_subb();
5620
    case CC_OP_SUBW: return compute_c_subw();
5621
    case CC_OP_SUBL: return compute_c_subl();
5622

    
5623
    case CC_OP_SBBB: return compute_c_sbbb();
5624
    case CC_OP_SBBW: return compute_c_sbbw();
5625
    case CC_OP_SBBL: return compute_c_sbbl();
5626

    
5627
    case CC_OP_LOGICB: return compute_c_logicb();
5628
    case CC_OP_LOGICW: return compute_c_logicw();
5629
    case CC_OP_LOGICL: return compute_c_logicl();
5630

    
5631
    case CC_OP_INCB: return compute_c_incl();
5632
    case CC_OP_INCW: return compute_c_incl();
5633
    case CC_OP_INCL: return compute_c_incl();
5634

    
5635
    case CC_OP_DECB: return compute_c_incl();
5636
    case CC_OP_DECW: return compute_c_incl();
5637
    case CC_OP_DECL: return compute_c_incl();
5638

    
5639
    case CC_OP_SHLB: return compute_c_shlb();
5640
    case CC_OP_SHLW: return compute_c_shlw();
5641
    case CC_OP_SHLL: return compute_c_shll();
5642

    
5643
    case CC_OP_SARB: return compute_c_sarl();
5644
    case CC_OP_SARW: return compute_c_sarl();
5645
    case CC_OP_SARL: return compute_c_sarl();
5646

    
5647
#ifdef TARGET_X86_64
5648
    case CC_OP_MULQ: return compute_c_mull();
5649

    
5650
    case CC_OP_ADDQ: return compute_c_addq();
5651

    
5652
    case CC_OP_ADCQ: return compute_c_adcq();
5653

    
5654
    case CC_OP_SUBQ: return compute_c_subq();
5655

    
5656
    case CC_OP_SBBQ: return compute_c_sbbq();
5657

    
5658
    case CC_OP_LOGICQ: return compute_c_logicq();
5659

    
5660
    case CC_OP_INCQ: return compute_c_incl();
5661

    
5662
    case CC_OP_DECQ: return compute_c_incl();
5663

    
5664
    case CC_OP_SHLQ: return compute_c_shlq();
5665

    
5666
    case CC_OP_SARQ: return compute_c_sarl();
5667
#endif
5668
    }
5669
}