Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ bcb5fec5

History | View | Annotate | Download (161 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <math.h>
21
#include "exec.h"
22
#include "exec-all.h"
23
#include "host-utils.h"
24
#include "ioport.h"
25

    
26
//#define DEBUG_PCALL
27

    
28

    
29
#ifdef DEBUG_PCALL
30
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31
#  define LOG_PCALL_STATE(env) \
32
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33
#else
34
#  define LOG_PCALL(...) do { } while (0)
35
#  define LOG_PCALL_STATE(env) do { } while (0)
36
#endif
37

    
38

    
39
#if 0
40
#define raise_exception_err(a, b)\
41
do {\
42
    qemu_log("raise_exception line=%d\n", __LINE__);\
43
    (raise_exception_err)(a, b);\
44
} while (0)
45
#endif
46

    
47
static const uint8_t parity_table[256] = {
48
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80
};
81

    
82
/* modulo 17 table */
83
static const uint8_t rclw_table[32] = {
84
    0, 1, 2, 3, 4, 5, 6, 7,
85
    8, 9,10,11,12,13,14,15,
86
   16, 0, 1, 2, 3, 4, 5, 6,
87
    7, 8, 9,10,11,12,13,14,
88
};
89

    
90
/* modulo 9 table */
91
static const uint8_t rclb_table[32] = {
92
    0, 1, 2, 3, 4, 5, 6, 7,
93
    8, 0, 1, 2, 3, 4, 5, 6,
94
    7, 8, 0, 1, 2, 3, 4, 5,
95
    6, 7, 8, 0, 1, 2, 3, 4,
96
};
97

    
98
static const CPU86_LDouble f15rk[7] =
99
{
100
    0.00000000000000000000L,
101
    1.00000000000000000000L,
102
    3.14159265358979323851L,  /*pi*/
103
    0.30102999566398119523L,  /*lg2*/
104
    0.69314718055994530943L,  /*ln2*/
105
    1.44269504088896340739L,  /*l2e*/
106
    3.32192809488736234781L,  /*l2t*/
107
};
108

    
109
/* broken thread support */
110

    
111
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
112

    
113
void helper_lock(void)
114
{
115
    spin_lock(&global_cpu_lock);
116
}
117

    
118
void helper_unlock(void)
119
{
120
    spin_unlock(&global_cpu_lock);
121
}
122

    
123
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
124
{
125
    load_eflags(t0, update_mask);
126
}
127

    
128
target_ulong helper_read_eflags(void)
129
{
130
    uint32_t eflags;
131
    eflags = helper_cc_compute_all(CC_OP);
132
    eflags |= (DF & DF_MASK);
133
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
134
    return eflags;
135
}
136

    
137
/* return non zero if error */
138
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
139
                               int selector)
140
{
141
    SegmentCache *dt;
142
    int index;
143
    target_ulong ptr;
144

    
145
    if (selector & 0x4)
146
        dt = &env->ldt;
147
    else
148
        dt = &env->gdt;
149
    index = selector & ~7;
150
    if ((index + 7) > dt->limit)
151
        return -1;
152
    ptr = dt->base + index;
153
    *e1_ptr = ldl_kernel(ptr);
154
    *e2_ptr = ldl_kernel(ptr + 4);
155
    return 0;
156
}
157

    
158
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
159
{
160
    unsigned int limit;
161
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
162
    if (e2 & DESC_G_MASK)
163
        limit = (limit << 12) | 0xfff;
164
    return limit;
165
}
166

    
167
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
168
{
169
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
170
}
171

    
172
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
173
{
174
    sc->base = get_seg_base(e1, e2);
175
    sc->limit = get_seg_limit(e1, e2);
176
    sc->flags = e2;
177
}
178

    
179
/* init the segment cache in vm86 mode. */
180
static inline void load_seg_vm(int seg, int selector)
181
{
182
    selector &= 0xffff;
183
    cpu_x86_load_seg_cache(env, seg, selector,
184
                           (selector << 4), 0xffff, 0);
185
}
186

    
187
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
188
                                       uint32_t *esp_ptr, int dpl)
189
{
190
    int type, index, shift;
191

    
192
#if 0
193
    {
194
        int i;
195
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
196
        for(i=0;i<env->tr.limit;i++) {
197
            printf("%02x ", env->tr.base[i]);
198
            if ((i & 7) == 7) printf("\n");
199
        }
200
        printf("\n");
201
    }
202
#endif
203

    
204
    if (!(env->tr.flags & DESC_P_MASK))
205
        cpu_abort(env, "invalid tss");
206
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
207
    if ((type & 7) != 1)
208
        cpu_abort(env, "invalid tss type");
209
    shift = type >> 3;
210
    index = (dpl * 4 + 2) << shift;
211
    if (index + (4 << shift) - 1 > env->tr.limit)
212
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
213
    if (shift == 0) {
214
        *esp_ptr = lduw_kernel(env->tr.base + index);
215
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
216
    } else {
217
        *esp_ptr = ldl_kernel(env->tr.base + index);
218
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
219
    }
220
}
221

    
222
/* XXX: merge with load_seg() */
223
static void tss_load_seg(int seg_reg, int selector)
224
{
225
    uint32_t e1, e2;
226
    int rpl, dpl, cpl;
227

    
228
    if ((selector & 0xfffc) != 0) {
229
        if (load_segment(&e1, &e2, selector) != 0)
230
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231
        if (!(e2 & DESC_S_MASK))
232
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233
        rpl = selector & 3;
234
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
235
        cpl = env->hflags & HF_CPL_MASK;
236
        if (seg_reg == R_CS) {
237
            if (!(e2 & DESC_CS_MASK))
238
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239
            /* XXX: is it correct ? */
240
            if (dpl != rpl)
241
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242
            if ((e2 & DESC_C_MASK) && dpl > rpl)
243
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244
        } else if (seg_reg == R_SS) {
245
            /* SS must be writable data */
246
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
247
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248
            if (dpl != cpl || dpl != rpl)
249
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250
        } else {
251
            /* not readable code */
252
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
253
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254
            /* if data or non conforming code, checks the rights */
255
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
256
                if (dpl < cpl || dpl < rpl)
257
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
258
            }
259
        }
260
        if (!(e2 & DESC_P_MASK))
261
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
262
        cpu_x86_load_seg_cache(env, seg_reg, selector,
263
                       get_seg_base(e1, e2),
264
                       get_seg_limit(e1, e2),
265
                       e2);
266
    } else {
267
        if (seg_reg == R_SS || seg_reg == R_CS)
268
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
269
    }
270
}
271

    
272
#define SWITCH_TSS_JMP  0
273
#define SWITCH_TSS_IRET 1
274
#define SWITCH_TSS_CALL 2
275

    
276
/* XXX: restore CPU state in registers (PowerPC case) */
277
static void switch_tss(int tss_selector,
278
                       uint32_t e1, uint32_t e2, int source,
279
                       uint32_t next_eip)
280
{
281
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
282
    target_ulong tss_base;
283
    uint32_t new_regs[8], new_segs[6];
284
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
285
    uint32_t old_eflags, eflags_mask;
286
    SegmentCache *dt;
287
    int index;
288
    target_ulong ptr;
289

    
290
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
291
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
292

    
293
    /* if task gate, we read the TSS segment and we load it */
294
    if (type == 5) {
295
        if (!(e2 & DESC_P_MASK))
296
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
297
        tss_selector = e1 >> 16;
298
        if (tss_selector & 4)
299
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
300
        if (load_segment(&e1, &e2, tss_selector) != 0)
301
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302
        if (e2 & DESC_S_MASK)
303
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
304
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
305
        if ((type & 7) != 1)
306
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307
    }
308

    
309
    if (!(e2 & DESC_P_MASK))
310
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
311

    
312
    if (type & 8)
313
        tss_limit_max = 103;
314
    else
315
        tss_limit_max = 43;
316
    tss_limit = get_seg_limit(e1, e2);
317
    tss_base = get_seg_base(e1, e2);
318
    if ((tss_selector & 4) != 0 ||
319
        tss_limit < tss_limit_max)
320
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
321
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322
    if (old_type & 8)
323
        old_tss_limit_max = 103;
324
    else
325
        old_tss_limit_max = 43;
326

    
327
    /* read all the registers from the new TSS */
328
    if (type & 8) {
329
        /* 32 bit */
330
        new_cr3 = ldl_kernel(tss_base + 0x1c);
331
        new_eip = ldl_kernel(tss_base + 0x20);
332
        new_eflags = ldl_kernel(tss_base + 0x24);
333
        for(i = 0; i < 8; i++)
334
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
335
        for(i = 0; i < 6; i++)
336
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
337
        new_ldt = lduw_kernel(tss_base + 0x60);
338
        new_trap = ldl_kernel(tss_base + 0x64);
339
    } else {
340
        /* 16 bit */
341
        new_cr3 = 0;
342
        new_eip = lduw_kernel(tss_base + 0x0e);
343
        new_eflags = lduw_kernel(tss_base + 0x10);
344
        for(i = 0; i < 8; i++)
345
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
346
        for(i = 0; i < 4; i++)
347
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
348
        new_ldt = lduw_kernel(tss_base + 0x2a);
349
        new_segs[R_FS] = 0;
350
        new_segs[R_GS] = 0;
351
        new_trap = 0;
352
    }
353
    /* XXX: avoid a compiler warning, see
354
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
355
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
356
    (void)new_trap;
357

    
358
    /* NOTE: we must avoid memory exceptions during the task switch,
359
       so we make dummy accesses before */
360
    /* XXX: it can still fail in some cases, so a bigger hack is
361
       necessary to valid the TLB after having done the accesses */
362

    
363
    v1 = ldub_kernel(env->tr.base);
364
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
365
    stb_kernel(env->tr.base, v1);
366
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
367

    
368
    /* clear busy bit (it is restartable) */
369
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
370
        target_ulong ptr;
371
        uint32_t e2;
372
        ptr = env->gdt.base + (env->tr.selector & ~7);
373
        e2 = ldl_kernel(ptr + 4);
374
        e2 &= ~DESC_TSS_BUSY_MASK;
375
        stl_kernel(ptr + 4, e2);
376
    }
377
    old_eflags = compute_eflags();
378
    if (source == SWITCH_TSS_IRET)
379
        old_eflags &= ~NT_MASK;
380

    
381
    /* save the current state in the old TSS */
382
    if (type & 8) {
383
        /* 32 bit */
384
        stl_kernel(env->tr.base + 0x20, next_eip);
385
        stl_kernel(env->tr.base + 0x24, old_eflags);
386
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
387
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
388
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
389
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
390
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
391
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
392
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
393
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
394
        for(i = 0; i < 6; i++)
395
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
396
    } else {
397
        /* 16 bit */
398
        stw_kernel(env->tr.base + 0x0e, next_eip);
399
        stw_kernel(env->tr.base + 0x10, old_eflags);
400
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
401
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
402
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
403
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
404
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
405
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
406
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
407
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
408
        for(i = 0; i < 4; i++)
409
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
410
    }
411

    
412
    /* now if an exception occurs, it will occurs in the next task
413
       context */
414

    
415
    if (source == SWITCH_TSS_CALL) {
416
        stw_kernel(tss_base, env->tr.selector);
417
        new_eflags |= NT_MASK;
418
    }
419

    
420
    /* set busy bit */
421
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
422
        target_ulong ptr;
423
        uint32_t e2;
424
        ptr = env->gdt.base + (tss_selector & ~7);
425
        e2 = ldl_kernel(ptr + 4);
426
        e2 |= DESC_TSS_BUSY_MASK;
427
        stl_kernel(ptr + 4, e2);
428
    }
429

    
430
    /* set the new CPU state */
431
    /* from this point, any exception which occurs can give problems */
432
    env->cr[0] |= CR0_TS_MASK;
433
    env->hflags |= HF_TS_MASK;
434
    env->tr.selector = tss_selector;
435
    env->tr.base = tss_base;
436
    env->tr.limit = tss_limit;
437
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
438

    
439
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
440
        cpu_x86_update_cr3(env, new_cr3);
441
    }
442

    
443
    /* load all registers without an exception, then reload them with
444
       possible exception */
445
    env->eip = new_eip;
446
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
447
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
448
    if (!(type & 8))
449
        eflags_mask &= 0xffff;
450
    load_eflags(new_eflags, eflags_mask);
451
    /* XXX: what to do in 16 bit case ? */
452
    EAX = new_regs[0];
453
    ECX = new_regs[1];
454
    EDX = new_regs[2];
455
    EBX = new_regs[3];
456
    ESP = new_regs[4];
457
    EBP = new_regs[5];
458
    ESI = new_regs[6];
459
    EDI = new_regs[7];
460
    if (new_eflags & VM_MASK) {
461
        for(i = 0; i < 6; i++)
462
            load_seg_vm(i, new_segs[i]);
463
        /* in vm86, CPL is always 3 */
464
        cpu_x86_set_cpl(env, 3);
465
    } else {
466
        /* CPL is set the RPL of CS */
467
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
468
        /* first just selectors as the rest may trigger exceptions */
469
        for(i = 0; i < 6; i++)
470
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
471
    }
472

    
473
    env->ldt.selector = new_ldt & ~4;
474
    env->ldt.base = 0;
475
    env->ldt.limit = 0;
476
    env->ldt.flags = 0;
477

    
478
    /* load the LDT */
479
    if (new_ldt & 4)
480
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481

    
482
    if ((new_ldt & 0xfffc) != 0) {
483
        dt = &env->gdt;
484
        index = new_ldt & ~7;
485
        if ((index + 7) > dt->limit)
486
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487
        ptr = dt->base + index;
488
        e1 = ldl_kernel(ptr);
489
        e2 = ldl_kernel(ptr + 4);
490
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
491
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
492
        if (!(e2 & DESC_P_MASK))
493
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
494
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
495
    }
496

    
497
    /* load the segments */
498
    if (!(new_eflags & VM_MASK)) {
499
        tss_load_seg(R_CS, new_segs[R_CS]);
500
        tss_load_seg(R_SS, new_segs[R_SS]);
501
        tss_load_seg(R_ES, new_segs[R_ES]);
502
        tss_load_seg(R_DS, new_segs[R_DS]);
503
        tss_load_seg(R_FS, new_segs[R_FS]);
504
        tss_load_seg(R_GS, new_segs[R_GS]);
505
    }
506

    
507
    /* check that EIP is in the CS segment limits */
508
    if (new_eip > env->segs[R_CS].limit) {
509
        /* XXX: different exception if CALL ? */
510
        raise_exception_err(EXCP0D_GPF, 0);
511
    }
512

    
513
#ifndef CONFIG_USER_ONLY
514
    /* reset local breakpoints */
515
    if (env->dr[7] & 0x55) {
516
        for (i = 0; i < 4; i++) {
517
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
518
                hw_breakpoint_remove(env, i);
519
        }
520
        env->dr[7] &= ~0x55;
521
    }
522
#endif
523
}
524

    
525
/* check if Port I/O is allowed in TSS */
526
static inline void check_io(int addr, int size)
527
{
528
    int io_offset, val, mask;
529

    
530
    /* TSS must be a valid 32 bit one */
531
    if (!(env->tr.flags & DESC_P_MASK) ||
532
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
533
        env->tr.limit < 103)
534
        goto fail;
535
    io_offset = lduw_kernel(env->tr.base + 0x66);
536
    io_offset += (addr >> 3);
537
    /* Note: the check needs two bytes */
538
    if ((io_offset + 1) > env->tr.limit)
539
        goto fail;
540
    val = lduw_kernel(env->tr.base + io_offset);
541
    val >>= (addr & 7);
542
    mask = (1 << size) - 1;
543
    /* all bits must be zero to allow the I/O */
544
    if ((val & mask) != 0) {
545
    fail:
546
        raise_exception_err(EXCP0D_GPF, 0);
547
    }
548
}
549

    
550
void helper_check_iob(uint32_t t0)
551
{
552
    check_io(t0, 1);
553
}
554

    
555
void helper_check_iow(uint32_t t0)
556
{
557
    check_io(t0, 2);
558
}
559

    
560
void helper_check_iol(uint32_t t0)
561
{
562
    check_io(t0, 4);
563
}
564

    
565
void helper_outb(uint32_t port, uint32_t data)
566
{
567
    cpu_outb(port, data & 0xff);
568
}
569

    
570
target_ulong helper_inb(uint32_t port)
571
{
572
    return cpu_inb(port);
573
}
574

    
575
void helper_outw(uint32_t port, uint32_t data)
576
{
577
    cpu_outw(port, data & 0xffff);
578
}
579

    
580
target_ulong helper_inw(uint32_t port)
581
{
582
    return cpu_inw(port);
583
}
584

    
585
void helper_outl(uint32_t port, uint32_t data)
586
{
587
    cpu_outl(port, data);
588
}
589

    
590
target_ulong helper_inl(uint32_t port)
591
{
592
    return cpu_inl(port);
593
}
594

    
595
static inline unsigned int get_sp_mask(unsigned int e2)
596
{
597
    if (e2 & DESC_B_MASK)
598
        return 0xffffffff;
599
    else
600
        return 0xffff;
601
}
602

    
603
static int exeption_has_error_code(int intno)
604
{
605
        switch(intno) {
606
        case 8:
607
        case 10:
608
        case 11:
609
        case 12:
610
        case 13:
611
        case 14:
612
        case 17:
613
            return 1;
614
        }
615
        return 0;
616
}
617

    
618
#ifdef TARGET_X86_64
619
#define SET_ESP(val, sp_mask)\
620
do {\
621
    if ((sp_mask) == 0xffff)\
622
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
623
    else if ((sp_mask) == 0xffffffffLL)\
624
        ESP = (uint32_t)(val);\
625
    else\
626
        ESP = (val);\
627
} while (0)
628
#else
629
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
630
#endif
631

    
632
/* in 64-bit machines, this can overflow. So this segment addition macro
633
 * can be used to trim the value to 32-bit whenever needed */
634
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
635

    
636
/* XXX: add a is_user flag to have proper security support */
637
#define PUSHW(ssp, sp, sp_mask, val)\
638
{\
639
    sp -= 2;\
640
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
641
}
642

    
643
#define PUSHL(ssp, sp, sp_mask, val)\
644
{\
645
    sp -= 4;\
646
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
647
}
648

    
649
#define POPW(ssp, sp, sp_mask, val)\
650
{\
651
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
652
    sp += 2;\
653
}
654

    
655
#define POPL(ssp, sp, sp_mask, val)\
656
{\
657
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
658
    sp += 4;\
659
}
660

    
661
/* protected mode interrupt */
662
static void do_interrupt_protected(int intno, int is_int, int error_code,
663
                                   unsigned int next_eip, int is_hw)
664
{
665
    SegmentCache *dt;
666
    target_ulong ptr, ssp;
667
    int type, dpl, selector, ss_dpl, cpl;
668
    int has_error_code, new_stack, shift;
669
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
670
    uint32_t old_eip, sp_mask;
671

    
672
    has_error_code = 0;
673
    if (!is_int && !is_hw)
674
        has_error_code = exeption_has_error_code(intno);
675
    if (is_int)
676
        old_eip = next_eip;
677
    else
678
        old_eip = env->eip;
679

    
680
    dt = &env->idt;
681
    if (intno * 8 + 7 > dt->limit)
682
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
683
    ptr = dt->base + intno * 8;
684
    e1 = ldl_kernel(ptr);
685
    e2 = ldl_kernel(ptr + 4);
686
    /* check gate type */
687
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
688
    switch(type) {
689
    case 5: /* task gate */
690
        /* must do that check here to return the correct error code */
691
        if (!(e2 & DESC_P_MASK))
692
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
693
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
694
        if (has_error_code) {
695
            int type;
696
            uint32_t mask;
697
            /* push the error code */
698
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
699
            shift = type >> 3;
700
            if (env->segs[R_SS].flags & DESC_B_MASK)
701
                mask = 0xffffffff;
702
            else
703
                mask = 0xffff;
704
            esp = (ESP - (2 << shift)) & mask;
705
            ssp = env->segs[R_SS].base + esp;
706
            if (shift)
707
                stl_kernel(ssp, error_code);
708
            else
709
                stw_kernel(ssp, error_code);
710
            SET_ESP(esp, mask);
711
        }
712
        return;
713
    case 6: /* 286 interrupt gate */
714
    case 7: /* 286 trap gate */
715
    case 14: /* 386 interrupt gate */
716
    case 15: /* 386 trap gate */
717
        break;
718
    default:
719
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720
        break;
721
    }
722
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
723
    cpl = env->hflags & HF_CPL_MASK;
724
    /* check privilege if software int */
725
    if (is_int && dpl < cpl)
726
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
727
    /* check valid bit */
728
    if (!(e2 & DESC_P_MASK))
729
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
730
    selector = e1 >> 16;
731
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
732
    if ((selector & 0xfffc) == 0)
733
        raise_exception_err(EXCP0D_GPF, 0);
734

    
735
    if (load_segment(&e1, &e2, selector) != 0)
736
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
738
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
740
    if (dpl > cpl)
741
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
742
    if (!(e2 & DESC_P_MASK))
743
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
744
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
745
        /* to inner privilege */
746
        get_ss_esp_from_tss(&ss, &esp, dpl);
747
        if ((ss & 0xfffc) == 0)
748
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749
        if ((ss & 3) != dpl)
750
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
752
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
754
        if (ss_dpl != dpl)
755
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756
        if (!(ss_e2 & DESC_S_MASK) ||
757
            (ss_e2 & DESC_CS_MASK) ||
758
            !(ss_e2 & DESC_W_MASK))
759
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
760
        if (!(ss_e2 & DESC_P_MASK))
761
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
762
        new_stack = 1;
763
        sp_mask = get_sp_mask(ss_e2);
764
        ssp = get_seg_base(ss_e1, ss_e2);
765
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
766
        /* to same privilege */
767
        if (env->eflags & VM_MASK)
768
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769
        new_stack = 0;
770
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
771
        ssp = env->segs[R_SS].base;
772
        esp = ESP;
773
        dpl = cpl;
774
    } else {
775
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
776
        new_stack = 0; /* avoid warning */
777
        sp_mask = 0; /* avoid warning */
778
        ssp = 0; /* avoid warning */
779
        esp = 0; /* avoid warning */
780
    }
781

    
782
    shift = type >> 3;
783

    
784
#if 0
785
    /* XXX: check that enough room is available */
786
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
787
    if (env->eflags & VM_MASK)
788
        push_size += 8;
789
    push_size <<= shift;
790
#endif
791
    if (shift == 1) {
792
        if (new_stack) {
793
            if (env->eflags & VM_MASK) {
794
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
795
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
796
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
797
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
798
            }
799
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
800
            PUSHL(ssp, esp, sp_mask, ESP);
801
        }
802
        PUSHL(ssp, esp, sp_mask, compute_eflags());
803
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
804
        PUSHL(ssp, esp, sp_mask, old_eip);
805
        if (has_error_code) {
806
            PUSHL(ssp, esp, sp_mask, error_code);
807
        }
808
    } else {
809
        if (new_stack) {
810
            if (env->eflags & VM_MASK) {
811
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
812
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
813
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
814
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
815
            }
816
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
817
            PUSHW(ssp, esp, sp_mask, ESP);
818
        }
819
        PUSHW(ssp, esp, sp_mask, compute_eflags());
820
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
821
        PUSHW(ssp, esp, sp_mask, old_eip);
822
        if (has_error_code) {
823
            PUSHW(ssp, esp, sp_mask, error_code);
824
        }
825
    }
826

    
827
    if (new_stack) {
828
        if (env->eflags & VM_MASK) {
829
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
830
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
831
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
832
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
833
        }
834
        ss = (ss & ~3) | dpl;
835
        cpu_x86_load_seg_cache(env, R_SS, ss,
836
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
837
    }
838
    SET_ESP(esp, sp_mask);
839

    
840
    selector = (selector & ~3) | dpl;
841
    cpu_x86_load_seg_cache(env, R_CS, selector,
842
                   get_seg_base(e1, e2),
843
                   get_seg_limit(e1, e2),
844
                   e2);
845
    cpu_x86_set_cpl(env, dpl);
846
    env->eip = offset;
847

    
848
    /* interrupt gate clear IF mask */
849
    if ((type & 1) == 0) {
850
        env->eflags &= ~IF_MASK;
851
    }
852
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
853
}
854

    
855
#ifdef TARGET_X86_64
856

    
857
#define PUSHQ(sp, val)\
858
{\
859
    sp -= 8;\
860
    stq_kernel(sp, (val));\
861
}
862

    
863
#define POPQ(sp, val)\
864
{\
865
    val = ldq_kernel(sp);\
866
    sp += 8;\
867
}
868

    
869
static inline target_ulong get_rsp_from_tss(int level)
870
{
871
    int index;
872

    
873
#if 0
874
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
875
           env->tr.base, env->tr.limit);
876
#endif
877

    
878
    if (!(env->tr.flags & DESC_P_MASK))
879
        cpu_abort(env, "invalid tss");
880
    index = 8 * level + 4;
881
    if ((index + 7) > env->tr.limit)
882
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
883
    return ldq_kernel(env->tr.base + index);
884
}
885

    
886
/* 64 bit interrupt */
887
static void do_interrupt64(int intno, int is_int, int error_code,
888
                           target_ulong next_eip, int is_hw)
889
{
890
    SegmentCache *dt;
891
    target_ulong ptr;
892
    int type, dpl, selector, cpl, ist;
893
    int has_error_code, new_stack;
894
    uint32_t e1, e2, e3, ss;
895
    target_ulong old_eip, esp, offset;
896

    
897
    has_error_code = 0;
898
    if (!is_int && !is_hw)
899
        has_error_code = exeption_has_error_code(intno);
900
    if (is_int)
901
        old_eip = next_eip;
902
    else
903
        old_eip = env->eip;
904

    
905
    dt = &env->idt;
906
    if (intno * 16 + 15 > dt->limit)
907
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908
    ptr = dt->base + intno * 16;
909
    e1 = ldl_kernel(ptr);
910
    e2 = ldl_kernel(ptr + 4);
911
    e3 = ldl_kernel(ptr + 8);
912
    /* check gate type */
913
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914
    switch(type) {
915
    case 14: /* 386 interrupt gate */
916
    case 15: /* 386 trap gate */
917
        break;
918
    default:
919
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920
        break;
921
    }
922
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923
    cpl = env->hflags & HF_CPL_MASK;
924
    /* check privilege if software int */
925
    if (is_int && dpl < cpl)
926
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927
    /* check valid bit */
928
    if (!(e2 & DESC_P_MASK))
929
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930
    selector = e1 >> 16;
931
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932
    ist = e2 & 7;
933
    if ((selector & 0xfffc) == 0)
934
        raise_exception_err(EXCP0D_GPF, 0);
935

    
936
    if (load_segment(&e1, &e2, selector) != 0)
937
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941
    if (dpl > cpl)
942
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943
    if (!(e2 & DESC_P_MASK))
944
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
948
        /* to inner privilege */
949
        if (ist != 0)
950
            esp = get_rsp_from_tss(ist + 3);
951
        else
952
            esp = get_rsp_from_tss(dpl);
953
        esp &= ~0xfLL; /* align stack */
954
        ss = 0;
955
        new_stack = 1;
956
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957
        /* to same privilege */
958
        if (env->eflags & VM_MASK)
959
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960
        new_stack = 0;
961
        if (ist != 0)
962
            esp = get_rsp_from_tss(ist + 3);
963
        else
964
            esp = ESP;
965
        esp &= ~0xfLL; /* align stack */
966
        dpl = cpl;
967
    } else {
968
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969
        new_stack = 0; /* avoid warning */
970
        esp = 0; /* avoid warning */
971
    }
972

    
973
    PUSHQ(esp, env->segs[R_SS].selector);
974
    PUSHQ(esp, ESP);
975
    PUSHQ(esp, compute_eflags());
976
    PUSHQ(esp, env->segs[R_CS].selector);
977
    PUSHQ(esp, old_eip);
978
    if (has_error_code) {
979
        PUSHQ(esp, error_code);
980
    }
981

    
982
    if (new_stack) {
983
        ss = 0 | dpl;
984
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985
    }
986
    ESP = esp;
987

    
988
    selector = (selector & ~3) | dpl;
989
    cpu_x86_load_seg_cache(env, R_CS, selector,
990
                   get_seg_base(e1, e2),
991
                   get_seg_limit(e1, e2),
992
                   e2);
993
    cpu_x86_set_cpl(env, dpl);
994
    env->eip = offset;
995

    
996
    /* interrupt gate clear IF mask */
997
    if ((type & 1) == 0) {
998
        env->eflags &= ~IF_MASK;
999
    }
1000
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001
}
1002
#endif
1003

    
1004
#ifdef TARGET_X86_64
1005
#if defined(CONFIG_USER_ONLY)
1006
void helper_syscall(int next_eip_addend)
1007
{
1008
    env->exception_index = EXCP_SYSCALL;
1009
    env->exception_next_eip = env->eip + next_eip_addend;
1010
    cpu_loop_exit();
1011
}
1012
#else
1013
void helper_syscall(int next_eip_addend)
1014
{
1015
    int selector;
1016

    
1017
    if (!(env->efer & MSR_EFER_SCE)) {
1018
        raise_exception_err(EXCP06_ILLOP, 0);
1019
    }
1020
    selector = (env->star >> 32) & 0xffff;
1021
    if (env->hflags & HF_LMA_MASK) {
1022
        int code64;
1023

    
1024
        ECX = env->eip + next_eip_addend;
1025
        env->regs[11] = compute_eflags();
1026

    
1027
        code64 = env->hflags & HF_CS64_MASK;
1028

    
1029
        cpu_x86_set_cpl(env, 0);
1030
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031
                           0, 0xffffffff,
1032
                               DESC_G_MASK | DESC_P_MASK |
1033
                               DESC_S_MASK |
1034
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1035
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1036
                               0, 0xffffffff,
1037
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038
                               DESC_S_MASK |
1039
                               DESC_W_MASK | DESC_A_MASK);
1040
        env->eflags &= ~env->fmask;
1041
        load_eflags(env->eflags, 0);
1042
        if (code64)
1043
            env->eip = env->lstar;
1044
        else
1045
            env->eip = env->cstar;
1046
    } else {
1047
        ECX = (uint32_t)(env->eip + next_eip_addend);
1048

    
1049
        cpu_x86_set_cpl(env, 0);
1050
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1051
                           0, 0xffffffff,
1052
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053
                               DESC_S_MASK |
1054
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1055
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1056
                               0, 0xffffffff,
1057
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1058
                               DESC_S_MASK |
1059
                               DESC_W_MASK | DESC_A_MASK);
1060
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1061
        env->eip = (uint32_t)env->star;
1062
    }
1063
}
1064
#endif
1065
#endif
1066

    
1067
#ifdef TARGET_X86_64
1068
void helper_sysret(int dflag)
1069
{
1070
    int cpl, selector;
1071

    
1072
    if (!(env->efer & MSR_EFER_SCE)) {
1073
        raise_exception_err(EXCP06_ILLOP, 0);
1074
    }
1075
    cpl = env->hflags & HF_CPL_MASK;
1076
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077
        raise_exception_err(EXCP0D_GPF, 0);
1078
    }
1079
    selector = (env->star >> 48) & 0xffff;
1080
    if (env->hflags & HF_LMA_MASK) {
1081
        if (dflag == 2) {
1082
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1083
                                   0, 0xffffffff,
1084
                                   DESC_G_MASK | DESC_P_MASK |
1085
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1087
                                   DESC_L_MASK);
1088
            env->eip = ECX;
1089
        } else {
1090
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1091
                                   0, 0xffffffff,
1092
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1095
            env->eip = (uint32_t)ECX;
1096
        }
1097
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1098
                               0, 0xffffffff,
1099
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1100
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1101
                               DESC_W_MASK | DESC_A_MASK);
1102
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1103
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1104
        cpu_x86_set_cpl(env, 3);
1105
    } else {
1106
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1107
                               0, 0xffffffff,
1108
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1111
        env->eip = (uint32_t)ECX;
1112
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1113
                               0, 0xffffffff,
1114
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1115
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1116
                               DESC_W_MASK | DESC_A_MASK);
1117
        env->eflags |= IF_MASK;
1118
        cpu_x86_set_cpl(env, 3);
1119
    }
1120
}
1121
#endif
1122

    
1123
/* real mode interrupt */
1124
static void do_interrupt_real(int intno, int is_int, int error_code,
1125
                              unsigned int next_eip)
1126
{
1127
    SegmentCache *dt;
1128
    target_ulong ptr, ssp;
1129
    int selector;
1130
    uint32_t offset, esp;
1131
    uint32_t old_cs, old_eip;
1132

    
1133
    /* real mode (simpler !) */
1134
    dt = &env->idt;
1135
    if (intno * 4 + 3 > dt->limit)
1136
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1137
    ptr = dt->base + intno * 4;
1138
    offset = lduw_kernel(ptr);
1139
    selector = lduw_kernel(ptr + 2);
1140
    esp = ESP;
1141
    ssp = env->segs[R_SS].base;
1142
    if (is_int)
1143
        old_eip = next_eip;
1144
    else
1145
        old_eip = env->eip;
1146
    old_cs = env->segs[R_CS].selector;
1147
    /* XXX: use SS segment size ? */
1148
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1149
    PUSHW(ssp, esp, 0xffff, old_cs);
1150
    PUSHW(ssp, esp, 0xffff, old_eip);
1151

    
1152
    /* update processor state */
1153
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1154
    env->eip = offset;
1155
    env->segs[R_CS].selector = selector;
1156
    env->segs[R_CS].base = (selector << 4);
1157
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1158
}
1159

    
1160
/* fake user mode interrupt */
1161
void do_interrupt_user(int intno, int is_int, int error_code,
1162
                       target_ulong next_eip)
1163
{
1164
    SegmentCache *dt;
1165
    target_ulong ptr;
1166
    int dpl, cpl, shift;
1167
    uint32_t e2;
1168

    
1169
    dt = &env->idt;
1170
    if (env->hflags & HF_LMA_MASK) {
1171
        shift = 4;
1172
    } else {
1173
        shift = 3;
1174
    }
1175
    ptr = dt->base + (intno << shift);
1176
    e2 = ldl_kernel(ptr + 4);
1177

    
1178
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1179
    cpl = env->hflags & HF_CPL_MASK;
1180
    /* check privilege if software int */
1181
    if (is_int && dpl < cpl)
1182
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1183

    
1184
    /* Since we emulate only user space, we cannot do more than
1185
       exiting the emulation with the suitable exception and error
1186
       code */
1187
    if (is_int)
1188
        EIP = next_eip;
1189
}
1190

    
1191
#if !defined(CONFIG_USER_ONLY)
1192
static void handle_even_inj(int intno, int is_int, int error_code,
1193
                int is_hw, int rm)
1194
{
1195
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1196
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1197
            int type;
1198
            if (is_int)
1199
                    type = SVM_EVTINJ_TYPE_SOFT;
1200
            else
1201
                    type = SVM_EVTINJ_TYPE_EXEPT;
1202
            event_inj = intno | type | SVM_EVTINJ_VALID;
1203
            if (!rm && exeption_has_error_code(intno)) {
1204
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1205
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1206
            }
1207
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1208
    }
1209
}
1210
#endif
1211

    
1212
/*
1213
 * Begin execution of an interruption. is_int is TRUE if coming from
1214
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1215
 * instruction. It is only relevant if is_int is TRUE.
1216
 */
1217
void do_interrupt(int intno, int is_int, int error_code,
1218
                  target_ulong next_eip, int is_hw)
1219
{
1220
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1221
        if ((env->cr[0] & CR0_PE_MASK)) {
1222
            static int count;
1223
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1224
                    count, intno, error_code, is_int,
1225
                    env->hflags & HF_CPL_MASK,
1226
                    env->segs[R_CS].selector, EIP,
1227
                    (int)env->segs[R_CS].base + EIP,
1228
                    env->segs[R_SS].selector, ESP);
1229
            if (intno == 0x0e) {
1230
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1231
            } else {
1232
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1233
            }
1234
            qemu_log("\n");
1235
            log_cpu_state(env, X86_DUMP_CCOP);
1236
#if 0
1237
            {
1238
                int i;
1239
                target_ulong ptr;
1240
                qemu_log("       code=");
1241
                ptr = env->segs[R_CS].base + env->eip;
1242
                for(i = 0; i < 16; i++) {
1243
                    qemu_log(" %02x", ldub(ptr + i));
1244
                }
1245
                qemu_log("\n");
1246
            }
1247
#endif
1248
            count++;
1249
        }
1250
    }
1251
    if (env->cr[0] & CR0_PE_MASK) {
1252
#if !defined(CONFIG_USER_ONLY)
1253
        if (env->hflags & HF_SVMI_MASK)
1254
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1255
#endif
1256
#ifdef TARGET_X86_64
1257
        if (env->hflags & HF_LMA_MASK) {
1258
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1259
        } else
1260
#endif
1261
        {
1262
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1263
        }
1264
    } else {
1265
#if !defined(CONFIG_USER_ONLY)
1266
        if (env->hflags & HF_SVMI_MASK)
1267
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1268
#endif
1269
        do_interrupt_real(intno, is_int, error_code, next_eip);
1270
    }
1271

    
1272
#if !defined(CONFIG_USER_ONLY)
1273
    if (env->hflags & HF_SVMI_MASK) {
1274
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1275
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1276
    }
1277
#endif
1278
}
1279

    
1280
/* This should come from sysemu.h - if we could include it here... */
1281
void qemu_system_reset_request(void);
1282

    
1283
/*
1284
 * Check nested exceptions and change to double or triple fault if
1285
 * needed. It should only be called, if this is not an interrupt.
1286
 * Returns the new exception number.
1287
 */
1288
static int check_exception(int intno, int *error_code)
1289
{
1290
    int first_contributory = env->old_exception == 0 ||
1291
                              (env->old_exception >= 10 &&
1292
                               env->old_exception <= 13);
1293
    int second_contributory = intno == 0 ||
1294
                               (intno >= 10 && intno <= 13);
1295

    
1296
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1297
                env->old_exception, intno);
1298

    
1299
#if !defined(CONFIG_USER_ONLY)
1300
    if (env->old_exception == EXCP08_DBLE) {
1301
        if (env->hflags & HF_SVMI_MASK)
1302
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1303

    
1304
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1305

    
1306
        qemu_system_reset_request();
1307
        return EXCP_HLT;
1308
    }
1309
#endif
1310

    
1311
    if ((first_contributory && second_contributory)
1312
        || (env->old_exception == EXCP0E_PAGE &&
1313
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1314
        intno = EXCP08_DBLE;
1315
        *error_code = 0;
1316
    }
1317

    
1318
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1319
        (intno == EXCP08_DBLE))
1320
        env->old_exception = intno;
1321

    
1322
    return intno;
1323
}
1324

    
1325
/*
1326
 * Signal an interruption. It is executed in the main CPU loop.
1327
 * is_int is TRUE if coming from the int instruction. next_eip is the
1328
 * EIP value AFTER the interrupt instruction. It is only relevant if
1329
 * is_int is TRUE.
1330
 */
1331
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1332
                                          int next_eip_addend)
1333
{
1334
    if (!is_int) {
1335
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1336
        intno = check_exception(intno, &error_code);
1337
    } else {
1338
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1339
    }
1340

    
1341
    env->exception_index = intno;
1342
    env->error_code = error_code;
1343
    env->exception_is_int = is_int;
1344
    env->exception_next_eip = env->eip + next_eip_addend;
1345
    cpu_loop_exit();
1346
}
1347

    
1348
/* shortcuts to generate exceptions */
1349

    
1350
void raise_exception_err(int exception_index, int error_code)
1351
{
1352
    raise_interrupt(exception_index, 0, error_code, 0);
1353
}
1354

    
1355
void raise_exception(int exception_index)
1356
{
1357
    raise_interrupt(exception_index, 0, 0, 0);
1358
}
1359

    
1360
void raise_exception_env(int exception_index, CPUState *nenv)
1361
{
1362
    env = nenv;
1363
    raise_exception(exception_index);
1364
}
1365
/* SMM support */
1366

    
1367
#if defined(CONFIG_USER_ONLY)
1368

    
1369
void do_smm_enter(void)
1370
{
1371
}
1372

    
1373
void helper_rsm(void)
1374
{
1375
}
1376

    
1377
#else
1378

    
1379
#ifdef TARGET_X86_64
1380
#define SMM_REVISION_ID 0x00020064
1381
#else
1382
#define SMM_REVISION_ID 0x00020000
1383
#endif
1384

    
1385
void do_smm_enter(void)
1386
{
1387
    target_ulong sm_state;
1388
    SegmentCache *dt;
1389
    int i, offset;
1390

    
1391
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1392
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1393

    
1394
    env->hflags |= HF_SMM_MASK;
1395
    cpu_smm_update(env);
1396

    
1397
    sm_state = env->smbase + 0x8000;
1398

    
1399
#ifdef TARGET_X86_64
1400
    for(i = 0; i < 6; i++) {
1401
        dt = &env->segs[i];
1402
        offset = 0x7e00 + i * 16;
1403
        stw_phys(sm_state + offset, dt->selector);
1404
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1405
        stl_phys(sm_state + offset + 4, dt->limit);
1406
        stq_phys(sm_state + offset + 8, dt->base);
1407
    }
1408

    
1409
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1410
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1411

    
1412
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1413
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1414
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1415
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1416

    
1417
    stq_phys(sm_state + 0x7e88, env->idt.base);
1418
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1419

    
1420
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1421
    stq_phys(sm_state + 0x7e98, env->tr.base);
1422
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1423
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1424

    
1425
    stq_phys(sm_state + 0x7ed0, env->efer);
1426

    
1427
    stq_phys(sm_state + 0x7ff8, EAX);
1428
    stq_phys(sm_state + 0x7ff0, ECX);
1429
    stq_phys(sm_state + 0x7fe8, EDX);
1430
    stq_phys(sm_state + 0x7fe0, EBX);
1431
    stq_phys(sm_state + 0x7fd8, ESP);
1432
    stq_phys(sm_state + 0x7fd0, EBP);
1433
    stq_phys(sm_state + 0x7fc8, ESI);
1434
    stq_phys(sm_state + 0x7fc0, EDI);
1435
    for(i = 8; i < 16; i++)
1436
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1437
    stq_phys(sm_state + 0x7f78, env->eip);
1438
    stl_phys(sm_state + 0x7f70, compute_eflags());
1439
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1440
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1441

    
1442
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1443
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1444
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1445

    
1446
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1447
    stl_phys(sm_state + 0x7f00, env->smbase);
1448
#else
1449
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1450
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1451
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1452
    stl_phys(sm_state + 0x7ff0, env->eip);
1453
    stl_phys(sm_state + 0x7fec, EDI);
1454
    stl_phys(sm_state + 0x7fe8, ESI);
1455
    stl_phys(sm_state + 0x7fe4, EBP);
1456
    stl_phys(sm_state + 0x7fe0, ESP);
1457
    stl_phys(sm_state + 0x7fdc, EBX);
1458
    stl_phys(sm_state + 0x7fd8, EDX);
1459
    stl_phys(sm_state + 0x7fd4, ECX);
1460
    stl_phys(sm_state + 0x7fd0, EAX);
1461
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1462
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1463

    
1464
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1465
    stl_phys(sm_state + 0x7f64, env->tr.base);
1466
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1467
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1468

    
1469
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1470
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1471
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1472
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1473

    
1474
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1475
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1476

    
1477
    stl_phys(sm_state + 0x7f58, env->idt.base);
1478
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1479

    
1480
    for(i = 0; i < 6; i++) {
1481
        dt = &env->segs[i];
1482
        if (i < 3)
1483
            offset = 0x7f84 + i * 12;
1484
        else
1485
            offset = 0x7f2c + (i - 3) * 12;
1486
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1487
        stl_phys(sm_state + offset + 8, dt->base);
1488
        stl_phys(sm_state + offset + 4, dt->limit);
1489
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1490
    }
1491
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1492

    
1493
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1494
    stl_phys(sm_state + 0x7ef8, env->smbase);
1495
#endif
1496
    /* init SMM cpu state */
1497

    
1498
#ifdef TARGET_X86_64
1499
    cpu_load_efer(env, 0);
1500
#endif
1501
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1502
    env->eip = 0x00008000;
1503
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1504
                           0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1507
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1508
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1509
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1510

    
1511
    cpu_x86_update_cr0(env,
1512
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1513
    cpu_x86_update_cr4(env, 0);
1514
    env->dr[7] = 0x00000400;
1515
    CC_OP = CC_OP_EFLAGS;
1516
}
1517

    
1518
void helper_rsm(void)
1519
{
1520
    target_ulong sm_state;
1521
    int i, offset;
1522
    uint32_t val;
1523

    
1524
    sm_state = env->smbase + 0x8000;
1525
#ifdef TARGET_X86_64
1526
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1527

    
1528
    for(i = 0; i < 6; i++) {
1529
        offset = 0x7e00 + i * 16;
1530
        cpu_x86_load_seg_cache(env, i,
1531
                               lduw_phys(sm_state + offset),
1532
                               ldq_phys(sm_state + offset + 8),
1533
                               ldl_phys(sm_state + offset + 4),
1534
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1535
    }
1536

    
1537
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1538
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1539

    
1540
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1541
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1542
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1543
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1544

    
1545
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1546
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1547

    
1548
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1549
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1550
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1551
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1552

    
1553
    EAX = ldq_phys(sm_state + 0x7ff8);
1554
    ECX = ldq_phys(sm_state + 0x7ff0);
1555
    EDX = ldq_phys(sm_state + 0x7fe8);
1556
    EBX = ldq_phys(sm_state + 0x7fe0);
1557
    ESP = ldq_phys(sm_state + 0x7fd8);
1558
    EBP = ldq_phys(sm_state + 0x7fd0);
1559
    ESI = ldq_phys(sm_state + 0x7fc8);
1560
    EDI = ldq_phys(sm_state + 0x7fc0);
1561
    for(i = 8; i < 16; i++)
1562
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1563
    env->eip = ldq_phys(sm_state + 0x7f78);
1564
    load_eflags(ldl_phys(sm_state + 0x7f70),
1565
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1566
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1567
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1568

    
1569
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1570
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1571
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1572

    
1573
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1574
    if (val & 0x20000) {
1575
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1576
    }
1577
#else
1578
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1579
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1580
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1581
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1582
    env->eip = ldl_phys(sm_state + 0x7ff0);
1583
    EDI = ldl_phys(sm_state + 0x7fec);
1584
    ESI = ldl_phys(sm_state + 0x7fe8);
1585
    EBP = ldl_phys(sm_state + 0x7fe4);
1586
    ESP = ldl_phys(sm_state + 0x7fe0);
1587
    EBX = ldl_phys(sm_state + 0x7fdc);
1588
    EDX = ldl_phys(sm_state + 0x7fd8);
1589
    ECX = ldl_phys(sm_state + 0x7fd4);
1590
    EAX = ldl_phys(sm_state + 0x7fd0);
1591
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1592
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1593

    
1594
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1595
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1596
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1597
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1598

    
1599
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1600
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1601
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1602
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1603

    
1604
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1605
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1606

    
1607
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1608
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1609

    
1610
    for(i = 0; i < 6; i++) {
1611
        if (i < 3)
1612
            offset = 0x7f84 + i * 12;
1613
        else
1614
            offset = 0x7f2c + (i - 3) * 12;
1615
        cpu_x86_load_seg_cache(env, i,
1616
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1617
                               ldl_phys(sm_state + offset + 8),
1618
                               ldl_phys(sm_state + offset + 4),
1619
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1620
    }
1621
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1622

    
1623
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1624
    if (val & 0x20000) {
1625
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1626
    }
1627
#endif
1628
    CC_OP = CC_OP_EFLAGS;
1629
    env->hflags &= ~HF_SMM_MASK;
1630
    cpu_smm_update(env);
1631

    
1632
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1633
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1634
}
1635

    
1636
#endif /* !CONFIG_USER_ONLY */
1637

    
1638

    
1639
/* division, flags are undefined */
1640

    
1641
void helper_divb_AL(target_ulong t0)
1642
{
1643
    unsigned int num, den, q, r;
1644

    
1645
    num = (EAX & 0xffff);
1646
    den = (t0 & 0xff);
1647
    if (den == 0) {
1648
        raise_exception(EXCP00_DIVZ);
1649
    }
1650
    q = (num / den);
1651
    if (q > 0xff)
1652
        raise_exception(EXCP00_DIVZ);
1653
    q &= 0xff;
1654
    r = (num % den) & 0xff;
1655
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1656
}
1657

    
1658
void helper_idivb_AL(target_ulong t0)
1659
{
1660
    int num, den, q, r;
1661

    
1662
    num = (int16_t)EAX;
1663
    den = (int8_t)t0;
1664
    if (den == 0) {
1665
        raise_exception(EXCP00_DIVZ);
1666
    }
1667
    q = (num / den);
1668
    if (q != (int8_t)q)
1669
        raise_exception(EXCP00_DIVZ);
1670
    q &= 0xff;
1671
    r = (num % den) & 0xff;
1672
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1673
}
1674

    
1675
void helper_divw_AX(target_ulong t0)
1676
{
1677
    unsigned int num, den, q, r;
1678

    
1679
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1680
    den = (t0 & 0xffff);
1681
    if (den == 0) {
1682
        raise_exception(EXCP00_DIVZ);
1683
    }
1684
    q = (num / den);
1685
    if (q > 0xffff)
1686
        raise_exception(EXCP00_DIVZ);
1687
    q &= 0xffff;
1688
    r = (num % den) & 0xffff;
1689
    EAX = (EAX & ~0xffff) | q;
1690
    EDX = (EDX & ~0xffff) | r;
1691
}
1692

    
1693
void helper_idivw_AX(target_ulong t0)
1694
{
1695
    int num, den, q, r;
1696

    
1697
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1698
    den = (int16_t)t0;
1699
    if (den == 0) {
1700
        raise_exception(EXCP00_DIVZ);
1701
    }
1702
    q = (num / den);
1703
    if (q != (int16_t)q)
1704
        raise_exception(EXCP00_DIVZ);
1705
    q &= 0xffff;
1706
    r = (num % den) & 0xffff;
1707
    EAX = (EAX & ~0xffff) | q;
1708
    EDX = (EDX & ~0xffff) | r;
1709
}
1710

    
1711
void helper_divl_EAX(target_ulong t0)
1712
{
1713
    unsigned int den, r;
1714
    uint64_t num, q;
1715

    
1716
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1717
    den = t0;
1718
    if (den == 0) {
1719
        raise_exception(EXCP00_DIVZ);
1720
    }
1721
    q = (num / den);
1722
    r = (num % den);
1723
    if (q > 0xffffffff)
1724
        raise_exception(EXCP00_DIVZ);
1725
    EAX = (uint32_t)q;
1726
    EDX = (uint32_t)r;
1727
}
1728

    
1729
void helper_idivl_EAX(target_ulong t0)
1730
{
1731
    int den, r;
1732
    int64_t num, q;
1733

    
1734
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1735
    den = t0;
1736
    if (den == 0) {
1737
        raise_exception(EXCP00_DIVZ);
1738
    }
1739
    q = (num / den);
1740
    r = (num % den);
1741
    if (q != (int32_t)q)
1742
        raise_exception(EXCP00_DIVZ);
1743
    EAX = (uint32_t)q;
1744
    EDX = (uint32_t)r;
1745
}
1746

    
1747
/* bcd */
1748

    
1749
/* XXX: exception */
1750
void helper_aam(int base)
1751
{
1752
    int al, ah;
1753
    al = EAX & 0xff;
1754
    ah = al / base;
1755
    al = al % base;
1756
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1757
    CC_DST = al;
1758
}
1759

    
1760
void helper_aad(int base)
1761
{
1762
    int al, ah;
1763
    al = EAX & 0xff;
1764
    ah = (EAX >> 8) & 0xff;
1765
    al = ((ah * base) + al) & 0xff;
1766
    EAX = (EAX & ~0xffff) | al;
1767
    CC_DST = al;
1768
}
1769

    
1770
void helper_aaa(void)
1771
{
1772
    int icarry;
1773
    int al, ah, af;
1774
    int eflags;
1775

    
1776
    eflags = helper_cc_compute_all(CC_OP);
1777
    af = eflags & CC_A;
1778
    al = EAX & 0xff;
1779
    ah = (EAX >> 8) & 0xff;
1780

    
1781
    icarry = (al > 0xf9);
1782
    if (((al & 0x0f) > 9 ) || af) {
1783
        al = (al + 6) & 0x0f;
1784
        ah = (ah + 1 + icarry) & 0xff;
1785
        eflags |= CC_C | CC_A;
1786
    } else {
1787
        eflags &= ~(CC_C | CC_A);
1788
        al &= 0x0f;
1789
    }
1790
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1791
    CC_SRC = eflags;
1792
}
1793

    
1794
void helper_aas(void)
1795
{
1796
    int icarry;
1797
    int al, ah, af;
1798
    int eflags;
1799

    
1800
    eflags = helper_cc_compute_all(CC_OP);
1801
    af = eflags & CC_A;
1802
    al = EAX & 0xff;
1803
    ah = (EAX >> 8) & 0xff;
1804

    
1805
    icarry = (al < 6);
1806
    if (((al & 0x0f) > 9 ) || af) {
1807
        al = (al - 6) & 0x0f;
1808
        ah = (ah - 1 - icarry) & 0xff;
1809
        eflags |= CC_C | CC_A;
1810
    } else {
1811
        eflags &= ~(CC_C | CC_A);
1812
        al &= 0x0f;
1813
    }
1814
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1815
    CC_SRC = eflags;
1816
}
1817

    
1818
void helper_daa(void)
1819
{
1820
    int al, af, cf;
1821
    int eflags;
1822

    
1823
    eflags = helper_cc_compute_all(CC_OP);
1824
    cf = eflags & CC_C;
1825
    af = eflags & CC_A;
1826
    al = EAX & 0xff;
1827

    
1828
    eflags = 0;
1829
    if (((al & 0x0f) > 9 ) || af) {
1830
        al = (al + 6) & 0xff;
1831
        eflags |= CC_A;
1832
    }
1833
    if ((al > 0x9f) || cf) {
1834
        al = (al + 0x60) & 0xff;
1835
        eflags |= CC_C;
1836
    }
1837
    EAX = (EAX & ~0xff) | al;
1838
    /* well, speed is not an issue here, so we compute the flags by hand */
1839
    eflags |= (al == 0) << 6; /* zf */
1840
    eflags |= parity_table[al]; /* pf */
1841
    eflags |= (al & 0x80); /* sf */
1842
    CC_SRC = eflags;
1843
}
1844

    
1845
void helper_das(void)
1846
{
1847
    int al, al1, af, cf;
1848
    int eflags;
1849

    
1850
    eflags = helper_cc_compute_all(CC_OP);
1851
    cf = eflags & CC_C;
1852
    af = eflags & CC_A;
1853
    al = EAX & 0xff;
1854

    
1855
    eflags = 0;
1856
    al1 = al;
1857
    if (((al & 0x0f) > 9 ) || af) {
1858
        eflags |= CC_A;
1859
        if (al < 6 || cf)
1860
            eflags |= CC_C;
1861
        al = (al - 6) & 0xff;
1862
    }
1863
    if ((al1 > 0x99) || cf) {
1864
        al = (al - 0x60) & 0xff;
1865
        eflags |= CC_C;
1866
    }
1867
    EAX = (EAX & ~0xff) | al;
1868
    /* well, speed is not an issue here, so we compute the flags by hand */
1869
    eflags |= (al == 0) << 6; /* zf */
1870
    eflags |= parity_table[al]; /* pf */
1871
    eflags |= (al & 0x80); /* sf */
1872
    CC_SRC = eflags;
1873
}
1874

    
1875
void helper_into(int next_eip_addend)
1876
{
1877
    int eflags;
1878
    eflags = helper_cc_compute_all(CC_OP);
1879
    if (eflags & CC_O) {
1880
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1881
    }
1882
}
1883

    
1884
void helper_cmpxchg8b(target_ulong a0)
1885
{
1886
    uint64_t d;
1887
    int eflags;
1888

    
1889
    eflags = helper_cc_compute_all(CC_OP);
1890
    d = ldq(a0);
1891
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1892
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1893
        eflags |= CC_Z;
1894
    } else {
1895
        /* always do the store */
1896
        stq(a0, d); 
1897
        EDX = (uint32_t)(d >> 32);
1898
        EAX = (uint32_t)d;
1899
        eflags &= ~CC_Z;
1900
    }
1901
    CC_SRC = eflags;
1902
}
1903

    
1904
#ifdef TARGET_X86_64
1905
void helper_cmpxchg16b(target_ulong a0)
1906
{
1907
    uint64_t d0, d1;
1908
    int eflags;
1909

    
1910
    if ((a0 & 0xf) != 0)
1911
        raise_exception(EXCP0D_GPF);
1912
    eflags = helper_cc_compute_all(CC_OP);
1913
    d0 = ldq(a0);
1914
    d1 = ldq(a0 + 8);
1915
    if (d0 == EAX && d1 == EDX) {
1916
        stq(a0, EBX);
1917
        stq(a0 + 8, ECX);
1918
        eflags |= CC_Z;
1919
    } else {
1920
        /* always do the store */
1921
        stq(a0, d0); 
1922
        stq(a0 + 8, d1); 
1923
        EDX = d1;
1924
        EAX = d0;
1925
        eflags &= ~CC_Z;
1926
    }
1927
    CC_SRC = eflags;
1928
}
1929
#endif
1930

    
1931
void helper_single_step(void)
1932
{
1933
#ifndef CONFIG_USER_ONLY
1934
    check_hw_breakpoints(env, 1);
1935
    env->dr[6] |= DR6_BS;
1936
#endif
1937
    raise_exception(EXCP01_DB);
1938
}
1939

    
1940
void helper_cpuid(void)
1941
{
1942
    uint32_t eax, ebx, ecx, edx;
1943

    
1944
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1945

    
1946
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1947
    EAX = eax;
1948
    EBX = ebx;
1949
    ECX = ecx;
1950
    EDX = edx;
1951
}
1952

    
1953
void helper_enter_level(int level, int data32, target_ulong t1)
1954
{
1955
    target_ulong ssp;
1956
    uint32_t esp_mask, esp, ebp;
1957

    
1958
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1959
    ssp = env->segs[R_SS].base;
1960
    ebp = EBP;
1961
    esp = ESP;
1962
    if (data32) {
1963
        /* 32 bit */
1964
        esp -= 4;
1965
        while (--level) {
1966
            esp -= 4;
1967
            ebp -= 4;
1968
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1969
        }
1970
        esp -= 4;
1971
        stl(ssp + (esp & esp_mask), t1);
1972
    } else {
1973
        /* 16 bit */
1974
        esp -= 2;
1975
        while (--level) {
1976
            esp -= 2;
1977
            ebp -= 2;
1978
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1979
        }
1980
        esp -= 2;
1981
        stw(ssp + (esp & esp_mask), t1);
1982
    }
1983
}
1984

    
1985
#ifdef TARGET_X86_64
1986
void helper_enter64_level(int level, int data64, target_ulong t1)
1987
{
1988
    target_ulong esp, ebp;
1989
    ebp = EBP;
1990
    esp = ESP;
1991

    
1992
    if (data64) {
1993
        /* 64 bit */
1994
        esp -= 8;
1995
        while (--level) {
1996
            esp -= 8;
1997
            ebp -= 8;
1998
            stq(esp, ldq(ebp));
1999
        }
2000
        esp -= 8;
2001
        stq(esp, t1);
2002
    } else {
2003
        /* 16 bit */
2004
        esp -= 2;
2005
        while (--level) {
2006
            esp -= 2;
2007
            ebp -= 2;
2008
            stw(esp, lduw(ebp));
2009
        }
2010
        esp -= 2;
2011
        stw(esp, t1);
2012
    }
2013
}
2014
#endif
2015

    
2016
void helper_lldt(int selector)
2017
{
2018
    SegmentCache *dt;
2019
    uint32_t e1, e2;
2020
    int index, entry_limit;
2021
    target_ulong ptr;
2022

    
2023
    selector &= 0xffff;
2024
    if ((selector & 0xfffc) == 0) {
2025
        /* XXX: NULL selector case: invalid LDT */
2026
        env->ldt.base = 0;
2027
        env->ldt.limit = 0;
2028
    } else {
2029
        if (selector & 0x4)
2030
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2031
        dt = &env->gdt;
2032
        index = selector & ~7;
2033
#ifdef TARGET_X86_64
2034
        if (env->hflags & HF_LMA_MASK)
2035
            entry_limit = 15;
2036
        else
2037
#endif
2038
            entry_limit = 7;
2039
        if ((index + entry_limit) > dt->limit)
2040
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041
        ptr = dt->base + index;
2042
        e1 = ldl_kernel(ptr);
2043
        e2 = ldl_kernel(ptr + 4);
2044
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2045
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2046
        if (!(e2 & DESC_P_MASK))
2047
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2048
#ifdef TARGET_X86_64
2049
        if (env->hflags & HF_LMA_MASK) {
2050
            uint32_t e3;
2051
            e3 = ldl_kernel(ptr + 8);
2052
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2053
            env->ldt.base |= (target_ulong)e3 << 32;
2054
        } else
2055
#endif
2056
        {
2057
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2058
        }
2059
    }
2060
    env->ldt.selector = selector;
2061
}
2062

    
2063
void helper_ltr(int selector)
2064
{
2065
    SegmentCache *dt;
2066
    uint32_t e1, e2;
2067
    int index, type, entry_limit;
2068
    target_ulong ptr;
2069

    
2070
    selector &= 0xffff;
2071
    if ((selector & 0xfffc) == 0) {
2072
        /* NULL selector case: invalid TR */
2073
        env->tr.base = 0;
2074
        env->tr.limit = 0;
2075
        env->tr.flags = 0;
2076
    } else {
2077
        if (selector & 0x4)
2078
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2079
        dt = &env->gdt;
2080
        index = selector & ~7;
2081
#ifdef TARGET_X86_64
2082
        if (env->hflags & HF_LMA_MASK)
2083
            entry_limit = 15;
2084
        else
2085
#endif
2086
            entry_limit = 7;
2087
        if ((index + entry_limit) > dt->limit)
2088
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089
        ptr = dt->base + index;
2090
        e1 = ldl_kernel(ptr);
2091
        e2 = ldl_kernel(ptr + 4);
2092
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2093
        if ((e2 & DESC_S_MASK) ||
2094
            (type != 1 && type != 9))
2095
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2096
        if (!(e2 & DESC_P_MASK))
2097
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2098
#ifdef TARGET_X86_64
2099
        if (env->hflags & HF_LMA_MASK) {
2100
            uint32_t e3, e4;
2101
            e3 = ldl_kernel(ptr + 8);
2102
            e4 = ldl_kernel(ptr + 12);
2103
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2104
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2105
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2106
            env->tr.base |= (target_ulong)e3 << 32;
2107
        } else
2108
#endif
2109
        {
2110
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2111
        }
2112
        e2 |= DESC_TSS_BUSY_MASK;
2113
        stl_kernel(ptr + 4, e2);
2114
    }
2115
    env->tr.selector = selector;
2116
}
2117

    
2118
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2119
void helper_load_seg(int seg_reg, int selector)
2120
{
2121
    uint32_t e1, e2;
2122
    int cpl, dpl, rpl;
2123
    SegmentCache *dt;
2124
    int index;
2125
    target_ulong ptr;
2126

    
2127
    selector &= 0xffff;
2128
    cpl = env->hflags & HF_CPL_MASK;
2129
    if ((selector & 0xfffc) == 0) {
2130
        /* null selector case */
2131
        if (seg_reg == R_SS
2132
#ifdef TARGET_X86_64
2133
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2134
#endif
2135
            )
2136
            raise_exception_err(EXCP0D_GPF, 0);
2137
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2138
    } else {
2139

    
2140
        if (selector & 0x4)
2141
            dt = &env->ldt;
2142
        else
2143
            dt = &env->gdt;
2144
        index = selector & ~7;
2145
        if ((index + 7) > dt->limit)
2146
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147
        ptr = dt->base + index;
2148
        e1 = ldl_kernel(ptr);
2149
        e2 = ldl_kernel(ptr + 4);
2150

    
2151
        if (!(e2 & DESC_S_MASK))
2152
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2153
        rpl = selector & 3;
2154
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2155
        if (seg_reg == R_SS) {
2156
            /* must be writable segment */
2157
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2158
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159
            if (rpl != cpl || dpl != cpl)
2160
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2161
        } else {
2162
            /* must be readable segment */
2163
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2164
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165

    
2166
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2167
                /* if not conforming code, test rights */
2168
                if (dpl < cpl || dpl < rpl)
2169
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2170
            }
2171
        }
2172

    
2173
        if (!(e2 & DESC_P_MASK)) {
2174
            if (seg_reg == R_SS)
2175
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2176
            else
2177
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2178
        }
2179

    
2180
        /* set the access bit if not already set */
2181
        if (!(e2 & DESC_A_MASK)) {
2182
            e2 |= DESC_A_MASK;
2183
            stl_kernel(ptr + 4, e2);
2184
        }
2185

    
2186
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2187
                       get_seg_base(e1, e2),
2188
                       get_seg_limit(e1, e2),
2189
                       e2);
2190
#if 0
2191
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2192
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2193
#endif
2194
    }
2195
}
2196

    
2197
/* protected mode jump */
2198
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2199
                           int next_eip_addend)
2200
{
2201
    int gate_cs, type;
2202
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2203
    target_ulong next_eip;
2204

    
2205
    if ((new_cs & 0xfffc) == 0)
2206
        raise_exception_err(EXCP0D_GPF, 0);
2207
    if (load_segment(&e1, &e2, new_cs) != 0)
2208
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209
    cpl = env->hflags & HF_CPL_MASK;
2210
    if (e2 & DESC_S_MASK) {
2211
        if (!(e2 & DESC_CS_MASK))
2212
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2214
        if (e2 & DESC_C_MASK) {
2215
            /* conforming code segment */
2216
            if (dpl > cpl)
2217
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218
        } else {
2219
            /* non conforming code segment */
2220
            rpl = new_cs & 3;
2221
            if (rpl > cpl)
2222
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223
            if (dpl != cpl)
2224
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225
        }
2226
        if (!(e2 & DESC_P_MASK))
2227
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2228
        limit = get_seg_limit(e1, e2);
2229
        if (new_eip > limit &&
2230
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2231
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2233
                       get_seg_base(e1, e2), limit, e2);
2234
        EIP = new_eip;
2235
    } else {
2236
        /* jump to call or task gate */
2237
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2238
        rpl = new_cs & 3;
2239
        cpl = env->hflags & HF_CPL_MASK;
2240
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2241
        switch(type) {
2242
        case 1: /* 286 TSS */
2243
        case 9: /* 386 TSS */
2244
        case 5: /* task gate */
2245
            if (dpl < cpl || dpl < rpl)
2246
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2247
            next_eip = env->eip + next_eip_addend;
2248
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2249
            CC_OP = CC_OP_EFLAGS;
2250
            break;
2251
        case 4: /* 286 call gate */
2252
        case 12: /* 386 call gate */
2253
            if ((dpl < cpl) || (dpl < rpl))
2254
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255
            if (!(e2 & DESC_P_MASK))
2256
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2257
            gate_cs = e1 >> 16;
2258
            new_eip = (e1 & 0xffff);
2259
            if (type == 12)
2260
                new_eip |= (e2 & 0xffff0000);
2261
            if (load_segment(&e1, &e2, gate_cs) != 0)
2262
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2264
            /* must be code segment */
2265
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2266
                 (DESC_S_MASK | DESC_CS_MASK)))
2267
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2269
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2270
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2271
            if (!(e2 & DESC_P_MASK))
2272
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2273
            limit = get_seg_limit(e1, e2);
2274
            if (new_eip > limit)
2275
                raise_exception_err(EXCP0D_GPF, 0);
2276
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2277
                                   get_seg_base(e1, e2), limit, e2);
2278
            EIP = new_eip;
2279
            break;
2280
        default:
2281
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282
            break;
2283
        }
2284
    }
2285
}
2286

    
2287
/* real mode call */
2288
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2289
                       int shift, int next_eip)
2290
{
2291
    int new_eip;
2292
    uint32_t esp, esp_mask;
2293
    target_ulong ssp;
2294

    
2295
    new_eip = new_eip1;
2296
    esp = ESP;
2297
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2298
    ssp = env->segs[R_SS].base;
2299
    if (shift) {
2300
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2301
        PUSHL(ssp, esp, esp_mask, next_eip);
2302
    } else {
2303
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2304
        PUSHW(ssp, esp, esp_mask, next_eip);
2305
    }
2306

    
2307
    SET_ESP(esp, esp_mask);
2308
    env->eip = new_eip;
2309
    env->segs[R_CS].selector = new_cs;
2310
    env->segs[R_CS].base = (new_cs << 4);
2311
}
2312

    
2313
/* protected mode call */
2314
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2315
                            int shift, int next_eip_addend)
2316
{
2317
    int new_stack, i;
2318
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2319
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2320
    uint32_t val, limit, old_sp_mask;
2321
    target_ulong ssp, old_ssp, next_eip;
2322

    
2323
    next_eip = env->eip + next_eip_addend;
2324
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2325
    LOG_PCALL_STATE(env);
2326
    if ((new_cs & 0xfffc) == 0)
2327
        raise_exception_err(EXCP0D_GPF, 0);
2328
    if (load_segment(&e1, &e2, new_cs) != 0)
2329
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330
    cpl = env->hflags & HF_CPL_MASK;
2331
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2332
    if (e2 & DESC_S_MASK) {
2333
        if (!(e2 & DESC_CS_MASK))
2334
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2336
        if (e2 & DESC_C_MASK) {
2337
            /* conforming code segment */
2338
            if (dpl > cpl)
2339
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340
        } else {
2341
            /* non conforming code segment */
2342
            rpl = new_cs & 3;
2343
            if (rpl > cpl)
2344
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2345
            if (dpl != cpl)
2346
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2347
        }
2348
        if (!(e2 & DESC_P_MASK))
2349
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2350

    
2351
#ifdef TARGET_X86_64
2352
        /* XXX: check 16/32 bit cases in long mode */
2353
        if (shift == 2) {
2354
            target_ulong rsp;
2355
            /* 64 bit case */
2356
            rsp = ESP;
2357
            PUSHQ(rsp, env->segs[R_CS].selector);
2358
            PUSHQ(rsp, next_eip);
2359
            /* from this point, not restartable */
2360
            ESP = rsp;
2361
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2362
                                   get_seg_base(e1, e2),
2363
                                   get_seg_limit(e1, e2), e2);
2364
            EIP = new_eip;
2365
        } else
2366
#endif
2367
        {
2368
            sp = ESP;
2369
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2370
            ssp = env->segs[R_SS].base;
2371
            if (shift) {
2372
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2373
                PUSHL(ssp, sp, sp_mask, next_eip);
2374
            } else {
2375
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2376
                PUSHW(ssp, sp, sp_mask, next_eip);
2377
            }
2378

    
2379
            limit = get_seg_limit(e1, e2);
2380
            if (new_eip > limit)
2381
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382
            /* from this point, not restartable */
2383
            SET_ESP(sp, sp_mask);
2384
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385
                                   get_seg_base(e1, e2), limit, e2);
2386
            EIP = new_eip;
2387
        }
2388
    } else {
2389
        /* check gate type */
2390
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2391
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2392
        rpl = new_cs & 3;
2393
        switch(type) {
2394
        case 1: /* available 286 TSS */
2395
        case 9: /* available 386 TSS */
2396
        case 5: /* task gate */
2397
            if (dpl < cpl || dpl < rpl)
2398
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2400
            CC_OP = CC_OP_EFLAGS;
2401
            return;
2402
        case 4: /* 286 call gate */
2403
        case 12: /* 386 call gate */
2404
            break;
2405
        default:
2406
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407
            break;
2408
        }
2409
        shift = type >> 3;
2410

    
2411
        if (dpl < cpl || dpl < rpl)
2412
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2413
        /* check valid bit */
2414
        if (!(e2 & DESC_P_MASK))
2415
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2416
        selector = e1 >> 16;
2417
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2418
        param_count = e2 & 0x1f;
2419
        if ((selector & 0xfffc) == 0)
2420
            raise_exception_err(EXCP0D_GPF, 0);
2421

    
2422
        if (load_segment(&e1, &e2, selector) != 0)
2423
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2425
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2426
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2427
        if (dpl > cpl)
2428
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2429
        if (!(e2 & DESC_P_MASK))
2430
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2431

    
2432
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2433
            /* to inner privilege */
2434
            get_ss_esp_from_tss(&ss, &sp, dpl);
2435
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2436
                        ss, sp, param_count, ESP);
2437
            if ((ss & 0xfffc) == 0)
2438
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439
            if ((ss & 3) != dpl)
2440
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2442
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2444
            if (ss_dpl != dpl)
2445
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2446
            if (!(ss_e2 & DESC_S_MASK) ||
2447
                (ss_e2 & DESC_CS_MASK) ||
2448
                !(ss_e2 & DESC_W_MASK))
2449
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450
            if (!(ss_e2 & DESC_P_MASK))
2451
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452

    
2453
            //            push_size = ((param_count * 2) + 8) << shift;
2454

    
2455
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2456
            old_ssp = env->segs[R_SS].base;
2457

    
2458
            sp_mask = get_sp_mask(ss_e2);
2459
            ssp = get_seg_base(ss_e1, ss_e2);
2460
            if (shift) {
2461
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2462
                PUSHL(ssp, sp, sp_mask, ESP);
2463
                for(i = param_count - 1; i >= 0; i--) {
2464
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2465
                    PUSHL(ssp, sp, sp_mask, val);
2466
                }
2467
            } else {
2468
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2469
                PUSHW(ssp, sp, sp_mask, ESP);
2470
                for(i = param_count - 1; i >= 0; i--) {
2471
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2472
                    PUSHW(ssp, sp, sp_mask, val);
2473
                }
2474
            }
2475
            new_stack = 1;
2476
        } else {
2477
            /* to same privilege */
2478
            sp = ESP;
2479
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2480
            ssp = env->segs[R_SS].base;
2481
            //            push_size = (4 << shift);
2482
            new_stack = 0;
2483
        }
2484

    
2485
        if (shift) {
2486
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2487
            PUSHL(ssp, sp, sp_mask, next_eip);
2488
        } else {
2489
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2490
            PUSHW(ssp, sp, sp_mask, next_eip);
2491
        }
2492

    
2493
        /* from this point, not restartable */
2494

    
2495
        if (new_stack) {
2496
            ss = (ss & ~3) | dpl;
2497
            cpu_x86_load_seg_cache(env, R_SS, ss,
2498
                                   ssp,
2499
                                   get_seg_limit(ss_e1, ss_e2),
2500
                                   ss_e2);
2501
        }
2502

    
2503
        selector = (selector & ~3) | dpl;
2504
        cpu_x86_load_seg_cache(env, R_CS, selector,
2505
                       get_seg_base(e1, e2),
2506
                       get_seg_limit(e1, e2),
2507
                       e2);
2508
        cpu_x86_set_cpl(env, dpl);
2509
        SET_ESP(sp, sp_mask);
2510
        EIP = offset;
2511
    }
2512
}
2513

    
2514
/* real and vm86 mode iret */
2515
void helper_iret_real(int shift)
2516
{
2517
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2518
    target_ulong ssp;
2519
    int eflags_mask;
2520

    
2521
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2522
    sp = ESP;
2523
    ssp = env->segs[R_SS].base;
2524
    if (shift == 1) {
2525
        /* 32 bits */
2526
        POPL(ssp, sp, sp_mask, new_eip);
2527
        POPL(ssp, sp, sp_mask, new_cs);
2528
        new_cs &= 0xffff;
2529
        POPL(ssp, sp, sp_mask, new_eflags);
2530
    } else {
2531
        /* 16 bits */
2532
        POPW(ssp, sp, sp_mask, new_eip);
2533
        POPW(ssp, sp, sp_mask, new_cs);
2534
        POPW(ssp, sp, sp_mask, new_eflags);
2535
    }
2536
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2537
    env->segs[R_CS].selector = new_cs;
2538
    env->segs[R_CS].base = (new_cs << 4);
2539
    env->eip = new_eip;
2540
    if (env->eflags & VM_MASK)
2541
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2542
    else
2543
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2544
    if (shift == 0)
2545
        eflags_mask &= 0xffff;
2546
    load_eflags(new_eflags, eflags_mask);
2547
    env->hflags2 &= ~HF2_NMI_MASK;
2548
}
2549

    
2550
static inline void validate_seg(int seg_reg, int cpl)
2551
{
2552
    int dpl;
2553
    uint32_t e2;
2554

    
2555
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2556
       they may still contain a valid base. I would be interested to
2557
       know how a real x86_64 CPU behaves */
2558
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2559
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2560
        return;
2561

    
2562
    e2 = env->segs[seg_reg].flags;
2563
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2564
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2565
        /* data or non conforming code segment */
2566
        if (dpl < cpl) {
2567
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2568
        }
2569
    }
2570
}
2571

    
2572
/* protected mode iret */
2573
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2574
{
2575
    uint32_t new_cs, new_eflags, new_ss;
2576
    uint32_t new_es, new_ds, new_fs, new_gs;
2577
    uint32_t e1, e2, ss_e1, ss_e2;
2578
    int cpl, dpl, rpl, eflags_mask, iopl;
2579
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2580

    
2581
#ifdef TARGET_X86_64
2582
    if (shift == 2)
2583
        sp_mask = -1;
2584
    else
2585
#endif
2586
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2587
    sp = ESP;
2588
    ssp = env->segs[R_SS].base;
2589
    new_eflags = 0; /* avoid warning */
2590
#ifdef TARGET_X86_64
2591
    if (shift == 2) {
2592
        POPQ(sp, new_eip);
2593
        POPQ(sp, new_cs);
2594
        new_cs &= 0xffff;
2595
        if (is_iret) {
2596
            POPQ(sp, new_eflags);
2597
        }
2598
    } else
2599
#endif
2600
    if (shift == 1) {
2601
        /* 32 bits */
2602
        POPL(ssp, sp, sp_mask, new_eip);
2603
        POPL(ssp, sp, sp_mask, new_cs);
2604
        new_cs &= 0xffff;
2605
        if (is_iret) {
2606
            POPL(ssp, sp, sp_mask, new_eflags);
2607
            if (new_eflags & VM_MASK)
2608
                goto return_to_vm86;
2609
        }
2610
    } else {
2611
        /* 16 bits */
2612
        POPW(ssp, sp, sp_mask, new_eip);
2613
        POPW(ssp, sp, sp_mask, new_cs);
2614
        if (is_iret)
2615
            POPW(ssp, sp, sp_mask, new_eflags);
2616
    }
2617
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2618
              new_cs, new_eip, shift, addend);
2619
    LOG_PCALL_STATE(env);
2620
    if ((new_cs & 0xfffc) == 0)
2621
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622
    if (load_segment(&e1, &e2, new_cs) != 0)
2623
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624
    if (!(e2 & DESC_S_MASK) ||
2625
        !(e2 & DESC_CS_MASK))
2626
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627
    cpl = env->hflags & HF_CPL_MASK;
2628
    rpl = new_cs & 3;
2629
    if (rpl < cpl)
2630
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2632
    if (e2 & DESC_C_MASK) {
2633
        if (dpl > rpl)
2634
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2635
    } else {
2636
        if (dpl != rpl)
2637
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2638
    }
2639
    if (!(e2 & DESC_P_MASK))
2640
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2641

    
2642
    sp += addend;
2643
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2644
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2645
        /* return to same privilege level */
2646
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2647
                       get_seg_base(e1, e2),
2648
                       get_seg_limit(e1, e2),
2649
                       e2);
2650
    } else {
2651
        /* return to different privilege level */
2652
#ifdef TARGET_X86_64
2653
        if (shift == 2) {
2654
            POPQ(sp, new_esp);
2655
            POPQ(sp, new_ss);
2656
            new_ss &= 0xffff;
2657
        } else
2658
#endif
2659
        if (shift == 1) {
2660
            /* 32 bits */
2661
            POPL(ssp, sp, sp_mask, new_esp);
2662
            POPL(ssp, sp, sp_mask, new_ss);
2663
            new_ss &= 0xffff;
2664
        } else {
2665
            /* 16 bits */
2666
            POPW(ssp, sp, sp_mask, new_esp);
2667
            POPW(ssp, sp, sp_mask, new_ss);
2668
        }
2669
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2670
                    new_ss, new_esp);
2671
        if ((new_ss & 0xfffc) == 0) {
2672
#ifdef TARGET_X86_64
2673
            /* NULL ss is allowed in long mode if cpl != 3*/
2674
            /* XXX: test CS64 ? */
2675
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2676
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2677
                                       0, 0xffffffff,
2678
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2679
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2680
                                       DESC_W_MASK | DESC_A_MASK);
2681
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2682
            } else
2683
#endif
2684
            {
2685
                raise_exception_err(EXCP0D_GPF, 0);
2686
            }
2687
        } else {
2688
            if ((new_ss & 3) != rpl)
2689
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2690
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2691
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2692
            if (!(ss_e2 & DESC_S_MASK) ||
2693
                (ss_e2 & DESC_CS_MASK) ||
2694
                !(ss_e2 & DESC_W_MASK))
2695
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2697
            if (dpl != rpl)
2698
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2699
            if (!(ss_e2 & DESC_P_MASK))
2700
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2701
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2702
                                   get_seg_base(ss_e1, ss_e2),
2703
                                   get_seg_limit(ss_e1, ss_e2),
2704
                                   ss_e2);
2705
        }
2706

    
2707
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2708
                       get_seg_base(e1, e2),
2709
                       get_seg_limit(e1, e2),
2710
                       e2);
2711
        cpu_x86_set_cpl(env, rpl);
2712
        sp = new_esp;
2713
#ifdef TARGET_X86_64
2714
        if (env->hflags & HF_CS64_MASK)
2715
            sp_mask = -1;
2716
        else
2717
#endif
2718
            sp_mask = get_sp_mask(ss_e2);
2719

    
2720
        /* validate data segments */
2721
        validate_seg(R_ES, rpl);
2722
        validate_seg(R_DS, rpl);
2723
        validate_seg(R_FS, rpl);
2724
        validate_seg(R_GS, rpl);
2725

    
2726
        sp += addend;
2727
    }
2728
    SET_ESP(sp, sp_mask);
2729
    env->eip = new_eip;
2730
    if (is_iret) {
2731
        /* NOTE: 'cpl' is the _old_ CPL */
2732
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2733
        if (cpl == 0)
2734
            eflags_mask |= IOPL_MASK;
2735
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2736
        if (cpl <= iopl)
2737
            eflags_mask |= IF_MASK;
2738
        if (shift == 0)
2739
            eflags_mask &= 0xffff;
2740
        load_eflags(new_eflags, eflags_mask);
2741
    }
2742
    return;
2743

    
2744
 return_to_vm86:
2745
    POPL(ssp, sp, sp_mask, new_esp);
2746
    POPL(ssp, sp, sp_mask, new_ss);
2747
    POPL(ssp, sp, sp_mask, new_es);
2748
    POPL(ssp, sp, sp_mask, new_ds);
2749
    POPL(ssp, sp, sp_mask, new_fs);
2750
    POPL(ssp, sp, sp_mask, new_gs);
2751

    
2752
    /* modify processor state */
2753
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2754
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2755
    load_seg_vm(R_CS, new_cs & 0xffff);
2756
    cpu_x86_set_cpl(env, 3);
2757
    load_seg_vm(R_SS, new_ss & 0xffff);
2758
    load_seg_vm(R_ES, new_es & 0xffff);
2759
    load_seg_vm(R_DS, new_ds & 0xffff);
2760
    load_seg_vm(R_FS, new_fs & 0xffff);
2761
    load_seg_vm(R_GS, new_gs & 0xffff);
2762

    
2763
    env->eip = new_eip & 0xffff;
2764
    ESP = new_esp;
2765
}
2766

    
2767
void helper_iret_protected(int shift, int next_eip)
2768
{
2769
    int tss_selector, type;
2770
    uint32_t e1, e2;
2771

    
2772
    /* specific case for TSS */
2773
    if (env->eflags & NT_MASK) {
2774
#ifdef TARGET_X86_64
2775
        if (env->hflags & HF_LMA_MASK)
2776
            raise_exception_err(EXCP0D_GPF, 0);
2777
#endif
2778
        tss_selector = lduw_kernel(env->tr.base + 0);
2779
        if (tss_selector & 4)
2780
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2781
        if (load_segment(&e1, &e2, tss_selector) != 0)
2782
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2783
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2784
        /* NOTE: we check both segment and busy TSS */
2785
        if (type != 3)
2786
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2788
    } else {
2789
        helper_ret_protected(shift, 1, 0);
2790
    }
2791
    env->hflags2 &= ~HF2_NMI_MASK;
2792
}
2793

    
2794
void helper_lret_protected(int shift, int addend)
2795
{
2796
    helper_ret_protected(shift, 0, addend);
2797
}
2798

    
2799
void helper_sysenter(void)
2800
{
2801
    if (env->sysenter_cs == 0) {
2802
        raise_exception_err(EXCP0D_GPF, 0);
2803
    }
2804
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2805
    cpu_x86_set_cpl(env, 0);
2806

    
2807
#ifdef TARGET_X86_64
2808
    if (env->hflags & HF_LMA_MASK) {
2809
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2810
                               0, 0xffffffff,
2811
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2812
                               DESC_S_MASK |
2813
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2814
    } else
2815
#endif
2816
    {
2817
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2818
                               0, 0xffffffff,
2819
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2820
                               DESC_S_MASK |
2821
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2822
    }
2823
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2824
                           0, 0xffffffff,
2825
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2826
                           DESC_S_MASK |
2827
                           DESC_W_MASK | DESC_A_MASK);
2828
    ESP = env->sysenter_esp;
2829
    EIP = env->sysenter_eip;
2830
}
2831

    
2832
void helper_sysexit(int dflag)
2833
{
2834
    int cpl;
2835

    
2836
    cpl = env->hflags & HF_CPL_MASK;
2837
    if (env->sysenter_cs == 0 || cpl != 0) {
2838
        raise_exception_err(EXCP0D_GPF, 0);
2839
    }
2840
    cpu_x86_set_cpl(env, 3);
2841
#ifdef TARGET_X86_64
2842
    if (dflag == 2) {
2843
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2844
                               0, 0xffffffff,
2845
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2848
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2849
                               0, 0xffffffff,
2850
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2851
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2852
                               DESC_W_MASK | DESC_A_MASK);
2853
    } else
2854
#endif
2855
    {
2856
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2857
                               0, 0xffffffff,
2858
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2860
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2861
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2862
                               0, 0xffffffff,
2863
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2864
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2865
                               DESC_W_MASK | DESC_A_MASK);
2866
    }
2867
    ESP = ECX;
2868
    EIP = EDX;
2869
}
2870

    
2871
#if defined(CONFIG_USER_ONLY)
2872
target_ulong helper_read_crN(int reg)
2873
{
2874
    return 0;
2875
}
2876

    
2877
void helper_write_crN(int reg, target_ulong t0)
2878
{
2879
}
2880

    
2881
void helper_movl_drN_T0(int reg, target_ulong t0)
2882
{
2883
}
2884
#else
2885
target_ulong helper_read_crN(int reg)
2886
{
2887
    target_ulong val;
2888

    
2889
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2890
    switch(reg) {
2891
    default:
2892
        val = env->cr[reg];
2893
        break;
2894
    case 8:
2895
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2896
            val = cpu_get_apic_tpr(env->apic_state);
2897
        } else {
2898
            val = env->v_tpr;
2899
        }
2900
        break;
2901
    }
2902
    return val;
2903
}
2904

    
2905
void helper_write_crN(int reg, target_ulong t0)
2906
{
2907
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2908
    switch(reg) {
2909
    case 0:
2910
        cpu_x86_update_cr0(env, t0);
2911
        break;
2912
    case 3:
2913
        cpu_x86_update_cr3(env, t0);
2914
        break;
2915
    case 4:
2916
        cpu_x86_update_cr4(env, t0);
2917
        break;
2918
    case 8:
2919
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2920
            cpu_set_apic_tpr(env->apic_state, t0);
2921
        }
2922
        env->v_tpr = t0 & 0x0f;
2923
        break;
2924
    default:
2925
        env->cr[reg] = t0;
2926
        break;
2927
    }
2928
}
2929

    
2930
void helper_movl_drN_T0(int reg, target_ulong t0)
2931
{
2932
    int i;
2933

    
2934
    if (reg < 4) {
2935
        hw_breakpoint_remove(env, reg);
2936
        env->dr[reg] = t0;
2937
        hw_breakpoint_insert(env, reg);
2938
    } else if (reg == 7) {
2939
        for (i = 0; i < 4; i++)
2940
            hw_breakpoint_remove(env, i);
2941
        env->dr[7] = t0;
2942
        for (i = 0; i < 4; i++)
2943
            hw_breakpoint_insert(env, i);
2944
    } else
2945
        env->dr[reg] = t0;
2946
}
2947
#endif
2948

    
2949
void helper_lmsw(target_ulong t0)
2950
{
2951
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2952
       if already set to one. */
2953
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2954
    helper_write_crN(0, t0);
2955
}
2956

    
2957
void helper_clts(void)
2958
{
2959
    env->cr[0] &= ~CR0_TS_MASK;
2960
    env->hflags &= ~HF_TS_MASK;
2961
}
2962

    
2963
void helper_invlpg(target_ulong addr)
2964
{
2965
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2966
    tlb_flush_page(env, addr);
2967
}
2968

    
2969
void helper_rdtsc(void)
2970
{
2971
    uint64_t val;
2972

    
2973
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2974
        raise_exception(EXCP0D_GPF);
2975
    }
2976
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2977

    
2978
    val = cpu_get_tsc(env) + env->tsc_offset;
2979
    EAX = (uint32_t)(val);
2980
    EDX = (uint32_t)(val >> 32);
2981
}
2982

    
2983
void helper_rdtscp(void)
2984
{
2985
    helper_rdtsc();
2986
    ECX = (uint32_t)(env->tsc_aux);
2987
}
2988

    
2989
void helper_rdpmc(void)
2990
{
2991
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2992
        raise_exception(EXCP0D_GPF);
2993
    }
2994
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2995
    
2996
    /* currently unimplemented */
2997
    raise_exception_err(EXCP06_ILLOP, 0);
2998
}
2999

    
3000
#if defined(CONFIG_USER_ONLY)
3001
void helper_wrmsr(void)
3002
{
3003
}
3004

    
3005
void helper_rdmsr(void)
3006
{
3007
}
3008
#else
3009
void helper_wrmsr(void)
3010
{
3011
    uint64_t val;
3012

    
3013
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3014

    
3015
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3016

    
3017
    switch((uint32_t)ECX) {
3018
    case MSR_IA32_SYSENTER_CS:
3019
        env->sysenter_cs = val & 0xffff;
3020
        break;
3021
    case MSR_IA32_SYSENTER_ESP:
3022
        env->sysenter_esp = val;
3023
        break;
3024
    case MSR_IA32_SYSENTER_EIP:
3025
        env->sysenter_eip = val;
3026
        break;
3027
    case MSR_IA32_APICBASE:
3028
        cpu_set_apic_base(env->apic_state, val);
3029
        break;
3030
    case MSR_EFER:
3031
        {
3032
            uint64_t update_mask;
3033
            update_mask = 0;
3034
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3035
                update_mask |= MSR_EFER_SCE;
3036
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3037
                update_mask |= MSR_EFER_LME;
3038
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3039
                update_mask |= MSR_EFER_FFXSR;
3040
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3041
                update_mask |= MSR_EFER_NXE;
3042
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3043
                update_mask |= MSR_EFER_SVME;
3044
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3045
                update_mask |= MSR_EFER_FFXSR;
3046
            cpu_load_efer(env, (env->efer & ~update_mask) |
3047
                          (val & update_mask));
3048
        }
3049
        break;
3050
    case MSR_STAR:
3051
        env->star = val;
3052
        break;
3053
    case MSR_PAT:
3054
        env->pat = val;
3055
        break;
3056
    case MSR_VM_HSAVE_PA:
3057
        env->vm_hsave = val;
3058
        break;
3059
#ifdef TARGET_X86_64
3060
    case MSR_LSTAR:
3061
        env->lstar = val;
3062
        break;
3063
    case MSR_CSTAR:
3064
        env->cstar = val;
3065
        break;
3066
    case MSR_FMASK:
3067
        env->fmask = val;
3068
        break;
3069
    case MSR_FSBASE:
3070
        env->segs[R_FS].base = val;
3071
        break;
3072
    case MSR_GSBASE:
3073
        env->segs[R_GS].base = val;
3074
        break;
3075
    case MSR_KERNELGSBASE:
3076
        env->kernelgsbase = val;
3077
        break;
3078
#endif
3079
    case MSR_MTRRphysBase(0):
3080
    case MSR_MTRRphysBase(1):
3081
    case MSR_MTRRphysBase(2):
3082
    case MSR_MTRRphysBase(3):
3083
    case MSR_MTRRphysBase(4):
3084
    case MSR_MTRRphysBase(5):
3085
    case MSR_MTRRphysBase(6):
3086
    case MSR_MTRRphysBase(7):
3087
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3088
        break;
3089
    case MSR_MTRRphysMask(0):
3090
    case MSR_MTRRphysMask(1):
3091
    case MSR_MTRRphysMask(2):
3092
    case MSR_MTRRphysMask(3):
3093
    case MSR_MTRRphysMask(4):
3094
    case MSR_MTRRphysMask(5):
3095
    case MSR_MTRRphysMask(6):
3096
    case MSR_MTRRphysMask(7):
3097
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3098
        break;
3099
    case MSR_MTRRfix64K_00000:
3100
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3101
        break;
3102
    case MSR_MTRRfix16K_80000:
3103
    case MSR_MTRRfix16K_A0000:
3104
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3105
        break;
3106
    case MSR_MTRRfix4K_C0000:
3107
    case MSR_MTRRfix4K_C8000:
3108
    case MSR_MTRRfix4K_D0000:
3109
    case MSR_MTRRfix4K_D8000:
3110
    case MSR_MTRRfix4K_E0000:
3111
    case MSR_MTRRfix4K_E8000:
3112
    case MSR_MTRRfix4K_F0000:
3113
    case MSR_MTRRfix4K_F8000:
3114
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3115
        break;
3116
    case MSR_MTRRdefType:
3117
        env->mtrr_deftype = val;
3118
        break;
3119
    case MSR_MCG_STATUS:
3120
        env->mcg_status = val;
3121
        break;
3122
    case MSR_MCG_CTL:
3123
        if ((env->mcg_cap & MCG_CTL_P)
3124
            && (val == 0 || val == ~(uint64_t)0))
3125
            env->mcg_ctl = val;
3126
        break;
3127
    case MSR_TSC_AUX:
3128
        env->tsc_aux = val;
3129
        break;
3130
    default:
3131
        if ((uint32_t)ECX >= MSR_MC0_CTL
3132
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3133
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3134
            if ((offset & 0x3) != 0
3135
                || (val == 0 || val == ~(uint64_t)0))
3136
                env->mce_banks[offset] = val;
3137
            break;
3138
        }
3139
        /* XXX: exception ? */
3140
        break;
3141
    }
3142
}
3143

    
3144
void helper_rdmsr(void)
3145
{
3146
    uint64_t val;
3147

    
3148
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3149

    
3150
    switch((uint32_t)ECX) {
3151
    case MSR_IA32_SYSENTER_CS:
3152
        val = env->sysenter_cs;
3153
        break;
3154
    case MSR_IA32_SYSENTER_ESP:
3155
        val = env->sysenter_esp;
3156
        break;
3157
    case MSR_IA32_SYSENTER_EIP:
3158
        val = env->sysenter_eip;
3159
        break;
3160
    case MSR_IA32_APICBASE:
3161
        val = cpu_get_apic_base(env->apic_state);
3162
        break;
3163
    case MSR_EFER:
3164
        val = env->efer;
3165
        break;
3166
    case MSR_STAR:
3167
        val = env->star;
3168
        break;
3169
    case MSR_PAT:
3170
        val = env->pat;
3171
        break;
3172
    case MSR_VM_HSAVE_PA:
3173
        val = env->vm_hsave;
3174
        break;
3175
    case MSR_IA32_PERF_STATUS:
3176
        /* tsc_increment_by_tick */
3177
        val = 1000ULL;
3178
        /* CPU multiplier */
3179
        val |= (((uint64_t)4ULL) << 40);
3180
        break;
3181
#ifdef TARGET_X86_64
3182
    case MSR_LSTAR:
3183
        val = env->lstar;
3184
        break;
3185
    case MSR_CSTAR:
3186
        val = env->cstar;
3187
        break;
3188
    case MSR_FMASK:
3189
        val = env->fmask;
3190
        break;
3191
    case MSR_FSBASE:
3192
        val = env->segs[R_FS].base;
3193
        break;
3194
    case MSR_GSBASE:
3195
        val = env->segs[R_GS].base;
3196
        break;
3197
    case MSR_KERNELGSBASE:
3198
        val = env->kernelgsbase;
3199
        break;
3200
    case MSR_TSC_AUX:
3201
        val = env->tsc_aux;
3202
        break;
3203
#endif
3204
    case MSR_MTRRphysBase(0):
3205
    case MSR_MTRRphysBase(1):
3206
    case MSR_MTRRphysBase(2):
3207
    case MSR_MTRRphysBase(3):
3208
    case MSR_MTRRphysBase(4):
3209
    case MSR_MTRRphysBase(5):
3210
    case MSR_MTRRphysBase(6):
3211
    case MSR_MTRRphysBase(7):
3212
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3213
        break;
3214
    case MSR_MTRRphysMask(0):
3215
    case MSR_MTRRphysMask(1):
3216
    case MSR_MTRRphysMask(2):
3217
    case MSR_MTRRphysMask(3):
3218
    case MSR_MTRRphysMask(4):
3219
    case MSR_MTRRphysMask(5):
3220
    case MSR_MTRRphysMask(6):
3221
    case MSR_MTRRphysMask(7):
3222
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3223
        break;
3224
    case MSR_MTRRfix64K_00000:
3225
        val = env->mtrr_fixed[0];
3226
        break;
3227
    case MSR_MTRRfix16K_80000:
3228
    case MSR_MTRRfix16K_A0000:
3229
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3230
        break;
3231
    case MSR_MTRRfix4K_C0000:
3232
    case MSR_MTRRfix4K_C8000:
3233
    case MSR_MTRRfix4K_D0000:
3234
    case MSR_MTRRfix4K_D8000:
3235
    case MSR_MTRRfix4K_E0000:
3236
    case MSR_MTRRfix4K_E8000:
3237
    case MSR_MTRRfix4K_F0000:
3238
    case MSR_MTRRfix4K_F8000:
3239
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3240
        break;
3241
    case MSR_MTRRdefType:
3242
        val = env->mtrr_deftype;
3243
        break;
3244
    case MSR_MTRRcap:
3245
        if (env->cpuid_features & CPUID_MTRR)
3246
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3247
        else
3248
            /* XXX: exception ? */
3249
            val = 0;
3250
        break;
3251
    case MSR_MCG_CAP:
3252
        val = env->mcg_cap;
3253
        break;
3254
    case MSR_MCG_CTL:
3255
        if (env->mcg_cap & MCG_CTL_P)
3256
            val = env->mcg_ctl;
3257
        else
3258
            val = 0;
3259
        break;
3260
    case MSR_MCG_STATUS:
3261
        val = env->mcg_status;
3262
        break;
3263
    default:
3264
        if ((uint32_t)ECX >= MSR_MC0_CTL
3265
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3266
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3267
            val = env->mce_banks[offset];
3268
            break;
3269
        }
3270
        /* XXX: exception ? */
3271
        val = 0;
3272
        break;
3273
    }
3274
    EAX = (uint32_t)(val);
3275
    EDX = (uint32_t)(val >> 32);
3276
}
3277
#endif
3278

    
3279
target_ulong helper_lsl(target_ulong selector1)
3280
{
3281
    unsigned int limit;
3282
    uint32_t e1, e2, eflags, selector;
3283
    int rpl, dpl, cpl, type;
3284

    
3285
    selector = selector1 & 0xffff;
3286
    eflags = helper_cc_compute_all(CC_OP);
3287
    if ((selector & 0xfffc) == 0)
3288
        goto fail;
3289
    if (load_segment(&e1, &e2, selector) != 0)
3290
        goto fail;
3291
    rpl = selector & 3;
3292
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293
    cpl = env->hflags & HF_CPL_MASK;
3294
    if (e2 & DESC_S_MASK) {
3295
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3296
            /* conforming */
3297
        } else {
3298
            if (dpl < cpl || dpl < rpl)
3299
                goto fail;
3300
        }
3301
    } else {
3302
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3303
        switch(type) {
3304
        case 1:
3305
        case 2:
3306
        case 3:
3307
        case 9:
3308
        case 11:
3309
            break;
3310
        default:
3311
            goto fail;
3312
        }
3313
        if (dpl < cpl || dpl < rpl) {
3314
        fail:
3315
            CC_SRC = eflags & ~CC_Z;
3316
            return 0;
3317
        }
3318
    }
3319
    limit = get_seg_limit(e1, e2);
3320
    CC_SRC = eflags | CC_Z;
3321
    return limit;
3322
}
3323

    
3324
target_ulong helper_lar(target_ulong selector1)
3325
{
3326
    uint32_t e1, e2, eflags, selector;
3327
    int rpl, dpl, cpl, type;
3328

    
3329
    selector = selector1 & 0xffff;
3330
    eflags = helper_cc_compute_all(CC_OP);
3331
    if ((selector & 0xfffc) == 0)
3332
        goto fail;
3333
    if (load_segment(&e1, &e2, selector) != 0)
3334
        goto fail;
3335
    rpl = selector & 3;
3336
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3337
    cpl = env->hflags & HF_CPL_MASK;
3338
    if (e2 & DESC_S_MASK) {
3339
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3340
            /* conforming */
3341
        } else {
3342
            if (dpl < cpl || dpl < rpl)
3343
                goto fail;
3344
        }
3345
    } else {
3346
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3347
        switch(type) {
3348
        case 1:
3349
        case 2:
3350
        case 3:
3351
        case 4:
3352
        case 5:
3353
        case 9:
3354
        case 11:
3355
        case 12:
3356
            break;
3357
        default:
3358
            goto fail;
3359
        }
3360
        if (dpl < cpl || dpl < rpl) {
3361
        fail:
3362
            CC_SRC = eflags & ~CC_Z;
3363
            return 0;
3364
        }
3365
    }
3366
    CC_SRC = eflags | CC_Z;
3367
    return e2 & 0x00f0ff00;
3368
}
3369

    
3370
void helper_verr(target_ulong selector1)
3371
{
3372
    uint32_t e1, e2, eflags, selector;
3373
    int rpl, dpl, cpl;
3374

    
3375
    selector = selector1 & 0xffff;
3376
    eflags = helper_cc_compute_all(CC_OP);
3377
    if ((selector & 0xfffc) == 0)
3378
        goto fail;
3379
    if (load_segment(&e1, &e2, selector) != 0)
3380
        goto fail;
3381
    if (!(e2 & DESC_S_MASK))
3382
        goto fail;
3383
    rpl = selector & 3;
3384
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3385
    cpl = env->hflags & HF_CPL_MASK;
3386
    if (e2 & DESC_CS_MASK) {
3387
        if (!(e2 & DESC_R_MASK))
3388
            goto fail;
3389
        if (!(e2 & DESC_C_MASK)) {
3390
            if (dpl < cpl || dpl < rpl)
3391
                goto fail;
3392
        }
3393
    } else {
3394
        if (dpl < cpl || dpl < rpl) {
3395
        fail:
3396
            CC_SRC = eflags & ~CC_Z;
3397
            return;
3398
        }
3399
    }
3400
    CC_SRC = eflags | CC_Z;
3401
}
3402

    
3403
void helper_verw(target_ulong selector1)
3404
{
3405
    uint32_t e1, e2, eflags, selector;
3406
    int rpl, dpl, cpl;
3407

    
3408
    selector = selector1 & 0xffff;
3409
    eflags = helper_cc_compute_all(CC_OP);
3410
    if ((selector & 0xfffc) == 0)
3411
        goto fail;
3412
    if (load_segment(&e1, &e2, selector) != 0)
3413
        goto fail;
3414
    if (!(e2 & DESC_S_MASK))
3415
        goto fail;
3416
    rpl = selector & 3;
3417
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3418
    cpl = env->hflags & HF_CPL_MASK;
3419
    if (e2 & DESC_CS_MASK) {
3420
        goto fail;
3421
    } else {
3422
        if (dpl < cpl || dpl < rpl)
3423
            goto fail;
3424
        if (!(e2 & DESC_W_MASK)) {
3425
        fail:
3426
            CC_SRC = eflags & ~CC_Z;
3427
            return;
3428
        }
3429
    }
3430
    CC_SRC = eflags | CC_Z;
3431
}
3432

    
3433
/* x87 FPU helpers */
3434

    
3435
static inline double CPU86_LDouble_to_double(CPU86_LDouble a)
3436
{
3437
    union {
3438
        float64 f64;
3439
        double d;
3440
    } u;
3441

    
3442
    u.f64 = floatx_to_float64(a, &env->fp_status);
3443
    return u.d;
3444
}
3445

    
3446
static inline CPU86_LDouble double_to_CPU86_LDouble(double a)
3447
{
3448
    union {
3449
        float64 f64;
3450
        double d;
3451
    } u;
3452

    
3453
    u.d = a;
3454
    return float64_to_floatx(u.f64, &env->fp_status);
3455
}
3456

    
3457
static void fpu_set_exception(int mask)
3458
{
3459
    env->fpus |= mask;
3460
    if (env->fpus & (~env->fpuc & FPUC_EM))
3461
        env->fpus |= FPUS_SE | FPUS_B;
3462
}
3463

    
3464
static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3465
{
3466
    if (floatx_is_zero(b)) {
3467
        fpu_set_exception(FPUS_ZE);
3468
    }
3469
    return floatx_div(a, b, &env->fp_status);
3470
}
3471

    
3472
static void fpu_raise_exception(void)
3473
{
3474
    if (env->cr[0] & CR0_NE_MASK) {
3475
        raise_exception(EXCP10_COPR);
3476
    }
3477
#if !defined(CONFIG_USER_ONLY)
3478
    else {
3479
        cpu_set_ferr(env);
3480
    }
3481
#endif
3482
}
3483

    
3484
void helper_flds_FT0(uint32_t val)
3485
{
3486
    union {
3487
        float32 f;
3488
        uint32_t i;
3489
    } u;
3490
    u.i = val;
3491
    FT0 = float32_to_floatx(u.f, &env->fp_status);
3492
}
3493

    
3494
void helper_fldl_FT0(uint64_t val)
3495
{
3496
    union {
3497
        float64 f;
3498
        uint64_t i;
3499
    } u;
3500
    u.i = val;
3501
    FT0 = float64_to_floatx(u.f, &env->fp_status);
3502
}
3503

    
3504
void helper_fildl_FT0(int32_t val)
3505
{
3506
    FT0 = int32_to_floatx(val, &env->fp_status);
3507
}
3508

    
3509
void helper_flds_ST0(uint32_t val)
3510
{
3511
    int new_fpstt;
3512
    union {
3513
        float32 f;
3514
        uint32_t i;
3515
    } u;
3516
    new_fpstt = (env->fpstt - 1) & 7;
3517
    u.i = val;
3518
    env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3519
    env->fpstt = new_fpstt;
3520
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3521
}
3522

    
3523
void helper_fldl_ST0(uint64_t val)
3524
{
3525
    int new_fpstt;
3526
    union {
3527
        float64 f;
3528
        uint64_t i;
3529
    } u;
3530
    new_fpstt = (env->fpstt - 1) & 7;
3531
    u.i = val;
3532
    env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3533
    env->fpstt = new_fpstt;
3534
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3535
}
3536

    
3537
void helper_fildl_ST0(int32_t val)
3538
{
3539
    int new_fpstt;
3540
    new_fpstt = (env->fpstt - 1) & 7;
3541
    env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3542
    env->fpstt = new_fpstt;
3543
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3544
}
3545

    
3546
void helper_fildll_ST0(int64_t val)
3547
{
3548
    int new_fpstt;
3549
    new_fpstt = (env->fpstt - 1) & 7;
3550
    env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3551
    env->fpstt = new_fpstt;
3552
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3553
}
3554

    
3555
uint32_t helper_fsts_ST0(void)
3556
{
3557
    union {
3558
        float32 f;
3559
        uint32_t i;
3560
    } u;
3561
    u.f = floatx_to_float32(ST0, &env->fp_status);
3562
    return u.i;
3563
}
3564

    
3565
uint64_t helper_fstl_ST0(void)
3566
{
3567
    union {
3568
        float64 f;
3569
        uint64_t i;
3570
    } u;
3571
    u.f = floatx_to_float64(ST0, &env->fp_status);
3572
    return u.i;
3573
}
3574

    
3575
int32_t helper_fist_ST0(void)
3576
{
3577
    int32_t val;
3578
    val = floatx_to_int32(ST0, &env->fp_status);
3579
    if (val != (int16_t)val)
3580
        val = -32768;
3581
    return val;
3582
}
3583

    
3584
int32_t helper_fistl_ST0(void)
3585
{
3586
    int32_t val;
3587
    val = floatx_to_int32(ST0, &env->fp_status);
3588
    return val;
3589
}
3590

    
3591
int64_t helper_fistll_ST0(void)
3592
{
3593
    int64_t val;
3594
    val = floatx_to_int64(ST0, &env->fp_status);
3595
    return val;
3596
}
3597

    
3598
int32_t helper_fistt_ST0(void)
3599
{
3600
    int32_t val;
3601
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3602
    if (val != (int16_t)val)
3603
        val = -32768;
3604
    return val;
3605
}
3606

    
3607
int32_t helper_fisttl_ST0(void)
3608
{
3609
    int32_t val;
3610
    val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3611
    return val;
3612
}
3613

    
3614
int64_t helper_fisttll_ST0(void)
3615
{
3616
    int64_t val;
3617
    val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3618
    return val;
3619
}
3620

    
3621
void helper_fldt_ST0(target_ulong ptr)
3622
{
3623
    int new_fpstt;
3624
    new_fpstt = (env->fpstt - 1) & 7;
3625
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3626
    env->fpstt = new_fpstt;
3627
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3628
}
3629

    
3630
void helper_fstt_ST0(target_ulong ptr)
3631
{
3632
    helper_fstt(ST0, ptr);
3633
}
3634

    
3635
void helper_fpush(void)
3636
{
3637
    fpush();
3638
}
3639

    
3640
void helper_fpop(void)
3641
{
3642
    fpop();
3643
}
3644

    
3645
void helper_fdecstp(void)
3646
{
3647
    env->fpstt = (env->fpstt - 1) & 7;
3648
    env->fpus &= (~0x4700);
3649
}
3650

    
3651
void helper_fincstp(void)
3652
{
3653
    env->fpstt = (env->fpstt + 1) & 7;
3654
    env->fpus &= (~0x4700);
3655
}
3656

    
3657
/* FPU move */
3658

    
3659
void helper_ffree_STN(int st_index)
3660
{
3661
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3662
}
3663

    
3664
void helper_fmov_ST0_FT0(void)
3665
{
3666
    ST0 = FT0;
3667
}
3668

    
3669
void helper_fmov_FT0_STN(int st_index)
3670
{
3671
    FT0 = ST(st_index);
3672
}
3673

    
3674
void helper_fmov_ST0_STN(int st_index)
3675
{
3676
    ST0 = ST(st_index);
3677
}
3678

    
3679
void helper_fmov_STN_ST0(int st_index)
3680
{
3681
    ST(st_index) = ST0;
3682
}
3683

    
3684
void helper_fxchg_ST0_STN(int st_index)
3685
{
3686
    CPU86_LDouble tmp;
3687
    tmp = ST(st_index);
3688
    ST(st_index) = ST0;
3689
    ST0 = tmp;
3690
}
3691

    
3692
/* FPU operations */
3693

    
3694
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3695

    
3696
void helper_fcom_ST0_FT0(void)
3697
{
3698
    int ret;
3699

    
3700
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3701
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3702
}
3703

    
3704
void helper_fucom_ST0_FT0(void)
3705
{
3706
    int ret;
3707

    
3708
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3709
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3710
}
3711

    
3712
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3713

    
3714
void helper_fcomi_ST0_FT0(void)
3715
{
3716
    int eflags;
3717
    int ret;
3718

    
3719
    ret = floatx_compare(ST0, FT0, &env->fp_status);
3720
    eflags = helper_cc_compute_all(CC_OP);
3721
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3722
    CC_SRC = eflags;
3723
}
3724

    
3725
void helper_fucomi_ST0_FT0(void)
3726
{
3727
    int eflags;
3728
    int ret;
3729

    
3730
    ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3731
    eflags = helper_cc_compute_all(CC_OP);
3732
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3733
    CC_SRC = eflags;
3734
}
3735

    
3736
void helper_fadd_ST0_FT0(void)
3737
{
3738
    ST0 = floatx_add(ST0, FT0, &env->fp_status);
3739
}
3740

    
3741
void helper_fmul_ST0_FT0(void)
3742
{
3743
    ST0 = floatx_mul(ST0, FT0, &env->fp_status);
3744
}
3745

    
3746
void helper_fsub_ST0_FT0(void)
3747
{
3748
    ST0 = floatx_sub(ST0, FT0, &env->fp_status);
3749
}
3750

    
3751
void helper_fsubr_ST0_FT0(void)
3752
{
3753
    ST0 = floatx_sub(FT0, ST0, &env->fp_status);
3754
}
3755

    
3756
void helper_fdiv_ST0_FT0(void)
3757
{
3758
    ST0 = helper_fdiv(ST0, FT0);
3759
}
3760

    
3761
void helper_fdivr_ST0_FT0(void)
3762
{
3763
    ST0 = helper_fdiv(FT0, ST0);
3764
}
3765

    
3766
/* fp operations between STN and ST0 */
3767

    
3768
void helper_fadd_STN_ST0(int st_index)
3769
{
3770
    ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
3771
}
3772

    
3773
void helper_fmul_STN_ST0(int st_index)
3774
{
3775
    ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
3776
}
3777

    
3778
void helper_fsub_STN_ST0(int st_index)
3779
{
3780
    ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
3781
}
3782

    
3783
void helper_fsubr_STN_ST0(int st_index)
3784
{
3785
    ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
3786
}
3787

    
3788
void helper_fdiv_STN_ST0(int st_index)
3789
{
3790
    CPU86_LDouble *p;
3791
    p = &ST(st_index);
3792
    *p = helper_fdiv(*p, ST0);
3793
}
3794

    
3795
void helper_fdivr_STN_ST0(int st_index)
3796
{
3797
    CPU86_LDouble *p;
3798
    p = &ST(st_index);
3799
    *p = helper_fdiv(ST0, *p);
3800
}
3801

    
3802
/* misc FPU operations */
3803
void helper_fchs_ST0(void)
3804
{
3805
    ST0 = floatx_chs(ST0);
3806
}
3807

    
3808
void helper_fabs_ST0(void)
3809
{
3810
    ST0 = floatx_abs(ST0);
3811
}
3812

    
3813
void helper_fld1_ST0(void)
3814
{
3815
    ST0 = f15rk[1];
3816
}
3817

    
3818
void helper_fldl2t_ST0(void)
3819
{
3820
    ST0 = f15rk[6];
3821
}
3822

    
3823
void helper_fldl2e_ST0(void)
3824
{
3825
    ST0 = f15rk[5];
3826
}
3827

    
3828
void helper_fldpi_ST0(void)
3829
{
3830
    ST0 = f15rk[2];
3831
}
3832

    
3833
void helper_fldlg2_ST0(void)
3834
{
3835
    ST0 = f15rk[3];
3836
}
3837

    
3838
void helper_fldln2_ST0(void)
3839
{
3840
    ST0 = f15rk[4];
3841
}
3842

    
3843
void helper_fldz_ST0(void)
3844
{
3845
    ST0 = f15rk[0];
3846
}
3847

    
3848
void helper_fldz_FT0(void)
3849
{
3850
    FT0 = f15rk[0];
3851
}
3852

    
3853
uint32_t helper_fnstsw(void)
3854
{
3855
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3856
}
3857

    
3858
uint32_t helper_fnstcw(void)
3859
{
3860
    return env->fpuc;
3861
}
3862

    
3863
static void update_fp_status(void)
3864
{
3865
    int rnd_type;
3866

    
3867
    /* set rounding mode */
3868
    switch(env->fpuc & RC_MASK) {
3869
    default:
3870
    case RC_NEAR:
3871
        rnd_type = float_round_nearest_even;
3872
        break;
3873
    case RC_DOWN:
3874
        rnd_type = float_round_down;
3875
        break;
3876
    case RC_UP:
3877
        rnd_type = float_round_up;
3878
        break;
3879
    case RC_CHOP:
3880
        rnd_type = float_round_to_zero;
3881
        break;
3882
    }
3883
    set_float_rounding_mode(rnd_type, &env->fp_status);
3884
#ifdef FLOATX80
3885
    switch((env->fpuc >> 8) & 3) {
3886
    case 0:
3887
        rnd_type = 32;
3888
        break;
3889
    case 2:
3890
        rnd_type = 64;
3891
        break;
3892
    case 3:
3893
    default:
3894
        rnd_type = 80;
3895
        break;
3896
    }
3897
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3898
#endif
3899
}
3900

    
3901
void helper_fldcw(uint32_t val)
3902
{
3903
    env->fpuc = val;
3904
    update_fp_status();
3905
}
3906

    
3907
void helper_fclex(void)
3908
{
3909
    env->fpus &= 0x7f00;
3910
}
3911

    
3912
void helper_fwait(void)
3913
{
3914
    if (env->fpus & FPUS_SE)
3915
        fpu_raise_exception();
3916
}
3917

    
3918
void helper_fninit(void)
3919
{
3920
    env->fpus = 0;
3921
    env->fpstt = 0;
3922
    env->fpuc = 0x37f;
3923
    env->fptags[0] = 1;
3924
    env->fptags[1] = 1;
3925
    env->fptags[2] = 1;
3926
    env->fptags[3] = 1;
3927
    env->fptags[4] = 1;
3928
    env->fptags[5] = 1;
3929
    env->fptags[6] = 1;
3930
    env->fptags[7] = 1;
3931
}
3932

    
3933
/* BCD ops */
3934

    
3935
void helper_fbld_ST0(target_ulong ptr)
3936
{
3937
    CPU86_LDouble tmp;
3938
    uint64_t val;
3939
    unsigned int v;
3940
    int i;
3941

    
3942
    val = 0;
3943
    for(i = 8; i >= 0; i--) {
3944
        v = ldub(ptr + i);
3945
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3946
    }
3947
    tmp = int64_to_floatx(val, &env->fp_status);
3948
    if (ldub(ptr + 9) & 0x80) {
3949
        floatx_chs(tmp);
3950
    }
3951
    fpush();
3952
    ST0 = tmp;
3953
}
3954

    
3955
void helper_fbst_ST0(target_ulong ptr)
3956
{
3957
    int v;
3958
    target_ulong mem_ref, mem_end;
3959
    int64_t val;
3960

    
3961
    val = floatx_to_int64(ST0, &env->fp_status);
3962
    mem_ref = ptr;
3963
    mem_end = mem_ref + 9;
3964
    if (val < 0) {
3965
        stb(mem_end, 0x80);
3966
        val = -val;
3967
    } else {
3968
        stb(mem_end, 0x00);
3969
    }
3970
    while (mem_ref < mem_end) {
3971
        if (val == 0)
3972
            break;
3973
        v = val % 100;
3974
        val = val / 100;
3975
        v = ((v / 10) << 4) | (v % 10);
3976
        stb(mem_ref++, v);
3977
    }
3978
    while (mem_ref < mem_end) {
3979
        stb(mem_ref++, 0);
3980
    }
3981
}
3982

    
3983
void helper_f2xm1(void)
3984
{
3985
    double val = CPU86_LDouble_to_double(ST0);
3986
    val = pow(2.0, val) - 1.0;
3987
    ST0 = double_to_CPU86_LDouble(val);
3988
}
3989

    
3990
void helper_fyl2x(void)
3991
{
3992
    double fptemp = CPU86_LDouble_to_double(ST0);
3993

    
3994
    if (fptemp>0.0){
3995
        fptemp = log(fptemp)/log(2.0);    /* log2(ST) */
3996
        fptemp *= CPU86_LDouble_to_double(ST1);
3997
        ST1 = double_to_CPU86_LDouble(fptemp);
3998
        fpop();
3999
    } else {
4000
        env->fpus &= (~0x4700);
4001
        env->fpus |= 0x400;
4002
    }
4003
}
4004

    
4005
void helper_fptan(void)
4006
{
4007
    double fptemp = CPU86_LDouble_to_double(ST0);
4008

    
4009
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4010
        env->fpus |= 0x400;
4011
    } else {
4012
        fptemp = tan(fptemp);
4013
        ST0 = double_to_CPU86_LDouble(fptemp);
4014
        fpush();
4015
        ST0 = floatx_one;
4016
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4017
        /* the above code is for  |arg| < 2**52 only */
4018
    }
4019
}
4020

    
4021
void helper_fpatan(void)
4022
{
4023
    double fptemp, fpsrcop;
4024

    
4025
    fpsrcop = CPU86_LDouble_to_double(ST1);
4026
    fptemp = CPU86_LDouble_to_double(ST0);
4027
    ST1 = double_to_CPU86_LDouble(atan2(fpsrcop, fptemp));
4028
    fpop();
4029
}
4030

    
4031
void helper_fxtract(void)
4032
{
4033
    CPU86_LDoubleU temp;
4034

    
4035
    temp.d = ST0;
4036

    
4037
    if (floatx_is_zero(ST0)) {
4038
        /* Easy way to generate -inf and raising division by 0 exception */
4039
        ST0 = floatx_div(floatx_chs(floatx_one), floatx_zero, &env->fp_status);
4040
        fpush();
4041
        ST0 = temp.d;
4042
    } else {
4043
        int expdif;
4044

    
4045
        expdif = EXPD(temp) - EXPBIAS;
4046
        /*DP exponent bias*/
4047
        ST0 = int32_to_floatx(expdif, &env->fp_status);
4048
        fpush();
4049
        BIASEXPONENT(temp);
4050
        ST0 = temp.d;
4051
    }
4052
}
4053

    
4054
void helper_fprem1(void)
4055
{
4056
    double st0, st1, dblq, fpsrcop, fptemp;
4057
    CPU86_LDoubleU fpsrcop1, fptemp1;
4058
    int expdif;
4059
    signed long long int q;
4060

    
4061
    st0 = CPU86_LDouble_to_double(ST0);
4062
    st1 = CPU86_LDouble_to_double(ST1);
4063

    
4064
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4065
        ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
4066
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4067
        return;
4068
    }
4069

    
4070
    fpsrcop = st0;
4071
    fptemp = st1;
4072
    fpsrcop1.d = ST0;
4073
    fptemp1.d = ST1;
4074
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4075

    
4076
    if (expdif < 0) {
4077
        /* optimisation? taken from the AMD docs */
4078
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4079
        /* ST0 is unchanged */
4080
        return;
4081
    }
4082

    
4083
    if (expdif < 53) {
4084
        dblq = fpsrcop / fptemp;
4085
        /* round dblq towards nearest integer */
4086
        dblq = rint(dblq);
4087
        st0 = fpsrcop - fptemp * dblq;
4088

    
4089
        /* convert dblq to q by truncating towards zero */
4090
        if (dblq < 0.0)
4091
           q = (signed long long int)(-dblq);
4092
        else
4093
           q = (signed long long int)dblq;
4094

    
4095
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4096
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4097
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4098
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4099
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4100
    } else {
4101
        env->fpus |= 0x400;  /* C2 <-- 1 */
4102
        fptemp = pow(2.0, expdif - 50);
4103
        fpsrcop = (st0 / st1) / fptemp;
4104
        /* fpsrcop = integer obtained by chopping */
4105
        fpsrcop = (fpsrcop < 0.0) ?
4106
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4107
        st0 -= (st1 * fpsrcop * fptemp);
4108
    }
4109
    ST0 = double_to_CPU86_LDouble(st0);
4110
}
4111

    
4112
void helper_fprem(void)
4113
{
4114
    double st0, st1, dblq, fpsrcop, fptemp;
4115
    CPU86_LDoubleU fpsrcop1, fptemp1;
4116
    int expdif;
4117
    signed long long int q;
4118

    
4119
    st0 = CPU86_LDouble_to_double(ST0);
4120
    st1 = CPU86_LDouble_to_double(ST1);
4121

    
4122
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4123
       ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
4124
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4125
       return;
4126
    }
4127

    
4128
    fpsrcop = st0;
4129
    fptemp = st1;
4130
    fpsrcop1.d = ST0;
4131
    fptemp1.d = ST1;
4132
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4133

    
4134
    if (expdif < 0) {
4135
        /* optimisation? taken from the AMD docs */
4136
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4137
        /* ST0 is unchanged */
4138
        return;
4139
    }
4140

    
4141
    if ( expdif < 53 ) {
4142
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4143
        /* round dblq towards zero */
4144
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4145
        st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4146

    
4147
        /* convert dblq to q by truncating towards zero */
4148
        if (dblq < 0.0)
4149
           q = (signed long long int)(-dblq);
4150
        else
4151
           q = (signed long long int)dblq;
4152

    
4153
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4154
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4155
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4156
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4157
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4158
    } else {
4159
        int N = 32 + (expdif % 32); /* as per AMD docs */
4160
        env->fpus |= 0x400;  /* C2 <-- 1 */
4161
        fptemp = pow(2.0, (double)(expdif - N));
4162
        fpsrcop = (st0 / st1) / fptemp;
4163
        /* fpsrcop = integer obtained by chopping */
4164
        fpsrcop = (fpsrcop < 0.0) ?
4165
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4166
        st0 -= (st1 * fpsrcop * fptemp);
4167
    }
4168
    ST0 = double_to_CPU86_LDouble(st0);
4169
}
4170

    
4171
void helper_fyl2xp1(void)
4172
{
4173
    double fptemp = CPU86_LDouble_to_double(ST0);
4174

    
4175
    if ((fptemp+1.0)>0.0) {
4176
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4177
        fptemp *= CPU86_LDouble_to_double(ST1);
4178
        ST1 = double_to_CPU86_LDouble(fptemp);
4179
        fpop();
4180
    } else {
4181
        env->fpus &= (~0x4700);
4182
        env->fpus |= 0x400;
4183
    }
4184
}
4185

    
4186
void helper_fsqrt(void)
4187
{
4188
    if (floatx_is_neg(ST0)) {
4189
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4190
        env->fpus |= 0x400;
4191
    }
4192
    ST0 = floatx_sqrt(ST0, &env->fp_status);
4193
}
4194

    
4195
void helper_fsincos(void)
4196
{
4197
    double fptemp = CPU86_LDouble_to_double(ST0);
4198

    
4199
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4200
        env->fpus |= 0x400;
4201
    } else {
4202
        ST0 = double_to_CPU86_LDouble(sin(fptemp));
4203
        fpush();
4204
        ST0 = double_to_CPU86_LDouble(cos(fptemp));
4205
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4206
        /* the above code is for  |arg| < 2**63 only */
4207
    }
4208
}
4209

    
4210
void helper_frndint(void)
4211
{
4212
    ST0 = floatx_round_to_int(ST0, &env->fp_status);
4213
}
4214

    
4215
void helper_fscale(void)
4216
{
4217
    if (floatx_is_any_nan(ST1)) {
4218
        ST0 = ST1;
4219
    } else {
4220
        int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status);
4221
        ST0 = floatx_scalbn(ST0, n, &env->fp_status);
4222
    }
4223
}
4224

    
4225
void helper_fsin(void)
4226
{
4227
    double fptemp = CPU86_LDouble_to_double(ST0);
4228

    
4229
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4230
        env->fpus |= 0x400;
4231
    } else {
4232
        ST0 = double_to_CPU86_LDouble(sin(fptemp));
4233
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4234
        /* the above code is for  |arg| < 2**53 only */
4235
    }
4236
}
4237

    
4238
void helper_fcos(void)
4239
{
4240
    double fptemp = CPU86_LDouble_to_double(ST0);
4241

    
4242
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4243
        env->fpus |= 0x400;
4244
    } else {
4245
        ST0 = double_to_CPU86_LDouble(cos(fptemp));
4246
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4247
        /* the above code is for  |arg5 < 2**63 only */
4248
    }
4249
}
4250

    
4251
void helper_fxam_ST0(void)
4252
{
4253
    CPU86_LDoubleU temp;
4254
    int expdif;
4255

    
4256
    temp.d = ST0;
4257

    
4258
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4259
    if (SIGND(temp))
4260
        env->fpus |= 0x200; /* C1 <-- 1 */
4261

    
4262
    /* XXX: test fptags too */
4263
    expdif = EXPD(temp);
4264
    if (expdif == MAXEXPD) {
4265
#ifdef USE_X86LDOUBLE
4266
        if (MANTD(temp) == 0x8000000000000000ULL)
4267
#else
4268
        if (MANTD(temp) == 0)
4269
#endif
4270
            env->fpus |=  0x500 /*Infinity*/;
4271
        else
4272
            env->fpus |=  0x100 /*NaN*/;
4273
    } else if (expdif == 0) {
4274
        if (MANTD(temp) == 0)
4275
            env->fpus |=  0x4000 /*Zero*/;
4276
        else
4277
            env->fpus |= 0x4400 /*Denormal*/;
4278
    } else {
4279
        env->fpus |= 0x400;
4280
    }
4281
}
4282

    
4283
void helper_fstenv(target_ulong ptr, int data32)
4284
{
4285
    int fpus, fptag, exp, i;
4286
    uint64_t mant;
4287
    CPU86_LDoubleU tmp;
4288

    
4289
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4290
    fptag = 0;
4291
    for (i=7; i>=0; i--) {
4292
        fptag <<= 2;
4293
        if (env->fptags[i]) {
4294
            fptag |= 3;
4295
        } else {
4296
            tmp.d = env->fpregs[i].d;
4297
            exp = EXPD(tmp);
4298
            mant = MANTD(tmp);
4299
            if (exp == 0 && mant == 0) {
4300
                /* zero */
4301
                fptag |= 1;
4302
            } else if (exp == 0 || exp == MAXEXPD
4303
#ifdef USE_X86LDOUBLE
4304
                       || (mant & (1LL << 63)) == 0
4305
#endif
4306
                       ) {
4307
                /* NaNs, infinity, denormal */
4308
                fptag |= 2;
4309
            }
4310
        }
4311
    }
4312
    if (data32) {
4313
        /* 32 bit */
4314
        stl(ptr, env->fpuc);
4315
        stl(ptr + 4, fpus);
4316
        stl(ptr + 8, fptag);
4317
        stl(ptr + 12, 0); /* fpip */
4318
        stl(ptr + 16, 0); /* fpcs */
4319
        stl(ptr + 20, 0); /* fpoo */
4320
        stl(ptr + 24, 0); /* fpos */
4321
    } else {
4322
        /* 16 bit */
4323
        stw(ptr, env->fpuc);
4324
        stw(ptr + 2, fpus);
4325
        stw(ptr + 4, fptag);
4326
        stw(ptr + 6, 0);
4327
        stw(ptr + 8, 0);
4328
        stw(ptr + 10, 0);
4329
        stw(ptr + 12, 0);
4330
    }
4331
}
4332

    
4333
void helper_fldenv(target_ulong ptr, int data32)
4334
{
4335
    int i, fpus, fptag;
4336

    
4337
    if (data32) {
4338
        env->fpuc = lduw(ptr);
4339
        fpus = lduw(ptr + 4);
4340
        fptag = lduw(ptr + 8);
4341
    }
4342
    else {
4343
        env->fpuc = lduw(ptr);
4344
        fpus = lduw(ptr + 2);
4345
        fptag = lduw(ptr + 4);
4346
    }
4347
    env->fpstt = (fpus >> 11) & 7;
4348
    env->fpus = fpus & ~0x3800;
4349
    for(i = 0;i < 8; i++) {
4350
        env->fptags[i] = ((fptag & 3) == 3);
4351
        fptag >>= 2;
4352
    }
4353
}
4354

    
4355
void helper_fsave(target_ulong ptr, int data32)
4356
{
4357
    CPU86_LDouble tmp;
4358
    int i;
4359

    
4360
    helper_fstenv(ptr, data32);
4361

    
4362
    ptr += (14 << data32);
4363
    for(i = 0;i < 8; i++) {
4364
        tmp = ST(i);
4365
        helper_fstt(tmp, ptr);
4366
        ptr += 10;
4367
    }
4368

    
4369
    /* fninit */
4370
    env->fpus = 0;
4371
    env->fpstt = 0;
4372
    env->fpuc = 0x37f;
4373
    env->fptags[0] = 1;
4374
    env->fptags[1] = 1;
4375
    env->fptags[2] = 1;
4376
    env->fptags[3] = 1;
4377
    env->fptags[4] = 1;
4378
    env->fptags[5] = 1;
4379
    env->fptags[6] = 1;
4380
    env->fptags[7] = 1;
4381
}
4382

    
4383
void helper_frstor(target_ulong ptr, int data32)
4384
{
4385
    CPU86_LDouble tmp;
4386
    int i;
4387

    
4388
    helper_fldenv(ptr, data32);
4389
    ptr += (14 << data32);
4390

    
4391
    for(i = 0;i < 8; i++) {
4392
        tmp = helper_fldt(ptr);
4393
        ST(i) = tmp;
4394
        ptr += 10;
4395
    }
4396
}
4397

    
4398
void helper_fxsave(target_ulong ptr, int data64)
4399
{
4400
    int fpus, fptag, i, nb_xmm_regs;
4401
    CPU86_LDouble tmp;
4402
    target_ulong addr;
4403

    
4404
    /* The operand must be 16 byte aligned */
4405
    if (ptr & 0xf) {
4406
        raise_exception(EXCP0D_GPF);
4407
    }
4408

    
4409
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4410
    fptag = 0;
4411
    for(i = 0; i < 8; i++) {
4412
        fptag |= (env->fptags[i] << i);
4413
    }
4414
    stw(ptr, env->fpuc);
4415
    stw(ptr + 2, fpus);
4416
    stw(ptr + 4, fptag ^ 0xff);
4417
#ifdef TARGET_X86_64
4418
    if (data64) {
4419
        stq(ptr + 0x08, 0); /* rip */
4420
        stq(ptr + 0x10, 0); /* rdp */
4421
    } else 
4422
#endif
4423
    {
4424
        stl(ptr + 0x08, 0); /* eip */
4425
        stl(ptr + 0x0c, 0); /* sel  */
4426
        stl(ptr + 0x10, 0); /* dp */
4427
        stl(ptr + 0x14, 0); /* sel  */
4428
    }
4429

    
4430
    addr = ptr + 0x20;
4431
    for(i = 0;i < 8; i++) {
4432
        tmp = ST(i);
4433
        helper_fstt(tmp, addr);
4434
        addr += 16;
4435
    }
4436

    
4437
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4438
        /* XXX: finish it */
4439
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4440
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4441
        if (env->hflags & HF_CS64_MASK)
4442
            nb_xmm_regs = 16;
4443
        else
4444
            nb_xmm_regs = 8;
4445
        addr = ptr + 0xa0;
4446
        /* Fast FXSAVE leaves out the XMM registers */
4447
        if (!(env->efer & MSR_EFER_FFXSR)
4448
          || (env->hflags & HF_CPL_MASK)
4449
          || !(env->hflags & HF_LMA_MASK)) {
4450
            for(i = 0; i < nb_xmm_regs; i++) {
4451
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4452
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4453
                addr += 16;
4454
            }
4455
        }
4456
    }
4457
}
4458

    
4459
void helper_fxrstor(target_ulong ptr, int data64)
4460
{
4461
    int i, fpus, fptag, nb_xmm_regs;
4462
    CPU86_LDouble tmp;
4463
    target_ulong addr;
4464

    
4465
    /* The operand must be 16 byte aligned */
4466
    if (ptr & 0xf) {
4467
        raise_exception(EXCP0D_GPF);
4468
    }
4469

    
4470
    env->fpuc = lduw(ptr);
4471
    fpus = lduw(ptr + 2);
4472
    fptag = lduw(ptr + 4);
4473
    env->fpstt = (fpus >> 11) & 7;
4474
    env->fpus = fpus & ~0x3800;
4475
    fptag ^= 0xff;
4476
    for(i = 0;i < 8; i++) {
4477
        env->fptags[i] = ((fptag >> i) & 1);
4478
    }
4479

    
4480
    addr = ptr + 0x20;
4481
    for(i = 0;i < 8; i++) {
4482
        tmp = helper_fldt(addr);
4483
        ST(i) = tmp;
4484
        addr += 16;
4485
    }
4486

    
4487
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4488
        /* XXX: finish it */
4489
        env->mxcsr = ldl(ptr + 0x18);
4490
        //ldl(ptr + 0x1c);
4491
        if (env->hflags & HF_CS64_MASK)
4492
            nb_xmm_regs = 16;
4493
        else
4494
            nb_xmm_regs = 8;
4495
        addr = ptr + 0xa0;
4496
        /* Fast FXRESTORE leaves out the XMM registers */
4497
        if (!(env->efer & MSR_EFER_FFXSR)
4498
          || (env->hflags & HF_CPL_MASK)
4499
          || !(env->hflags & HF_LMA_MASK)) {
4500
            for(i = 0; i < nb_xmm_regs; i++) {
4501
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4502
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4503
                addr += 16;
4504
            }
4505
        }
4506
    }
4507
}
4508

    
4509
#ifndef USE_X86LDOUBLE
4510

    
4511
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4512
{
4513
    CPU86_LDoubleU temp;
4514
    int e;
4515

    
4516
    temp.d = f;
4517
    /* mantissa */
4518
    *pmant = (MANTD(temp) << 11) | (1LL << 63);
4519
    /* exponent + sign */
4520
    e = EXPD(temp) - EXPBIAS + 16383;
4521
    e |= SIGND(temp) >> 16;
4522
    *pexp = e;
4523
}
4524

    
4525
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4526
{
4527
    CPU86_LDoubleU temp;
4528
    int e;
4529
    uint64_t ll;
4530

    
4531
    /* XXX: handle overflow ? */
4532
    e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4533
    e |= (upper >> 4) & 0x800; /* sign */
4534
    ll = (mant >> 11) & ((1LL << 52) - 1);
4535
#ifdef __arm__
4536
    temp.l.upper = (e << 20) | (ll >> 32);
4537
    temp.l.lower = ll;
4538
#else
4539
    temp.ll = ll | ((uint64_t)e << 52);
4540
#endif
4541
    return temp.d;
4542
}
4543

    
4544
#else
4545

    
4546
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4547
{
4548
    CPU86_LDoubleU temp;
4549

    
4550
    temp.d = f;
4551
    *pmant = temp.l.lower;
4552
    *pexp = temp.l.upper;
4553
}
4554

    
4555
CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4556
{
4557
    CPU86_LDoubleU temp;
4558

    
4559
    temp.l.upper = upper;
4560
    temp.l.lower = mant;
4561
    return temp.d;
4562
}
4563
#endif
4564

    
4565
#ifdef TARGET_X86_64
4566

    
4567
//#define DEBUG_MULDIV
4568

    
4569
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4570
{
4571
    *plow += a;
4572
    /* carry test */
4573
    if (*plow < a)
4574
        (*phigh)++;
4575
    *phigh += b;
4576
}
4577

    
4578
static void neg128(uint64_t *plow, uint64_t *phigh)
4579
{
4580
    *plow = ~ *plow;
4581
    *phigh = ~ *phigh;
4582
    add128(plow, phigh, 1, 0);
4583
}
4584

    
4585
/* return TRUE if overflow */
4586
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4587
{
4588
    uint64_t q, r, a1, a0;
4589
    int i, qb, ab;
4590

    
4591
    a0 = *plow;
4592
    a1 = *phigh;
4593
    if (a1 == 0) {
4594
        q = a0 / b;
4595
        r = a0 % b;
4596
        *plow = q;
4597
        *phigh = r;
4598
    } else {
4599
        if (a1 >= b)
4600
            return 1;
4601
        /* XXX: use a better algorithm */
4602
        for(i = 0; i < 64; i++) {
4603
            ab = a1 >> 63;
4604
            a1 = (a1 << 1) | (a0 >> 63);
4605
            if (ab || a1 >= b) {
4606
                a1 -= b;
4607
                qb = 1;
4608
            } else {
4609
                qb = 0;
4610
            }
4611
            a0 = (a0 << 1) | qb;
4612
        }
4613
#if defined(DEBUG_MULDIV)
4614
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4615
               *phigh, *plow, b, a0, a1);
4616
#endif
4617
        *plow = a0;
4618
        *phigh = a1;
4619
    }
4620
    return 0;
4621
}
4622

    
4623
/* return TRUE if overflow */
4624
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4625
{
4626
    int sa, sb;
4627
    sa = ((int64_t)*phigh < 0);
4628
    if (sa)
4629
        neg128(plow, phigh);
4630
    sb = (b < 0);
4631
    if (sb)
4632
        b = -b;
4633
    if (div64(plow, phigh, b) != 0)
4634
        return 1;
4635
    if (sa ^ sb) {
4636
        if (*plow > (1ULL << 63))
4637
            return 1;
4638
        *plow = - *plow;
4639
    } else {
4640
        if (*plow >= (1ULL << 63))
4641
            return 1;
4642
    }
4643
    if (sa)
4644
        *phigh = - *phigh;
4645
    return 0;
4646
}
4647

    
4648
void helper_mulq_EAX_T0(target_ulong t0)
4649
{
4650
    uint64_t r0, r1;
4651

    
4652
    mulu64(&r0, &r1, EAX, t0);
4653
    EAX = r0;
4654
    EDX = r1;
4655
    CC_DST = r0;
4656
    CC_SRC = r1;
4657
}
4658

    
4659
void helper_imulq_EAX_T0(target_ulong t0)
4660
{
4661
    uint64_t r0, r1;
4662

    
4663
    muls64(&r0, &r1, EAX, t0);
4664
    EAX = r0;
4665
    EDX = r1;
4666
    CC_DST = r0;
4667
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4668
}
4669

    
4670
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4671
{
4672
    uint64_t r0, r1;
4673

    
4674
    muls64(&r0, &r1, t0, t1);
4675
    CC_DST = r0;
4676
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4677
    return r0;
4678
}
4679

    
4680
void helper_divq_EAX(target_ulong t0)
4681
{
4682
    uint64_t r0, r1;
4683
    if (t0 == 0) {
4684
        raise_exception(EXCP00_DIVZ);
4685
    }
4686
    r0 = EAX;
4687
    r1 = EDX;
4688
    if (div64(&r0, &r1, t0))
4689
        raise_exception(EXCP00_DIVZ);
4690
    EAX = r0;
4691
    EDX = r1;
4692
}
4693

    
4694
void helper_idivq_EAX(target_ulong t0)
4695
{
4696
    uint64_t r0, r1;
4697
    if (t0 == 0) {
4698
        raise_exception(EXCP00_DIVZ);
4699
    }
4700
    r0 = EAX;
4701
    r1 = EDX;
4702
    if (idiv64(&r0, &r1, t0))
4703
        raise_exception(EXCP00_DIVZ);
4704
    EAX = r0;
4705
    EDX = r1;
4706
}
4707
#endif
4708

    
4709
static void do_hlt(void)
4710
{
4711
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4712
    env->halted = 1;
4713
    env->exception_index = EXCP_HLT;
4714
    cpu_loop_exit();
4715
}
4716

    
4717
void helper_hlt(int next_eip_addend)
4718
{
4719
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4720
    EIP += next_eip_addend;
4721
    
4722
    do_hlt();
4723
}
4724

    
4725
void helper_monitor(target_ulong ptr)
4726
{
4727
    if ((uint32_t)ECX != 0)
4728
        raise_exception(EXCP0D_GPF);
4729
    /* XXX: store address ? */
4730
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4731
}
4732

    
4733
void helper_mwait(int next_eip_addend)
4734
{
4735
    if ((uint32_t)ECX != 0)
4736
        raise_exception(EXCP0D_GPF);
4737
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4738
    EIP += next_eip_addend;
4739

    
4740
    /* XXX: not complete but not completely erroneous */
4741
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4742
        /* more than one CPU: do not sleep because another CPU may
4743
           wake this one */
4744
    } else {
4745
        do_hlt();
4746
    }
4747
}
4748

    
4749
void helper_debug(void)
4750
{
4751
    env->exception_index = EXCP_DEBUG;
4752
    cpu_loop_exit();
4753
}
4754

    
4755
void helper_reset_rf(void)
4756
{
4757
    env->eflags &= ~RF_MASK;
4758
}
4759

    
4760
void helper_raise_interrupt(int intno, int next_eip_addend)
4761
{
4762
    raise_interrupt(intno, 1, 0, next_eip_addend);
4763
}
4764

    
4765
void helper_raise_exception(int exception_index)
4766
{
4767
    raise_exception(exception_index);
4768
}
4769

    
4770
void helper_cli(void)
4771
{
4772
    env->eflags &= ~IF_MASK;
4773
}
4774

    
4775
void helper_sti(void)
4776
{
4777
    env->eflags |= IF_MASK;
4778
}
4779

    
4780
#if 0
4781
/* vm86plus instructions */
4782
void helper_cli_vm(void)
4783
{
4784
    env->eflags &= ~VIF_MASK;
4785
}
4786

4787
void helper_sti_vm(void)
4788
{
4789
    env->eflags |= VIF_MASK;
4790
    if (env->eflags & VIP_MASK) {
4791
        raise_exception(EXCP0D_GPF);
4792
    }
4793
}
4794
#endif
4795

    
4796
void helper_set_inhibit_irq(void)
4797
{
4798
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4799
}
4800

    
4801
void helper_reset_inhibit_irq(void)
4802
{
4803
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4804
}
4805

    
4806
void helper_boundw(target_ulong a0, int v)
4807
{
4808
    int low, high;
4809
    low = ldsw(a0);
4810
    high = ldsw(a0 + 2);
4811
    v = (int16_t)v;
4812
    if (v < low || v > high) {
4813
        raise_exception(EXCP05_BOUND);
4814
    }
4815
}
4816

    
4817
void helper_boundl(target_ulong a0, int v)
4818
{
4819
    int low, high;
4820
    low = ldl(a0);
4821
    high = ldl(a0 + 4);
4822
    if (v < low || v > high) {
4823
        raise_exception(EXCP05_BOUND);
4824
    }
4825
}
4826

    
4827
#if !defined(CONFIG_USER_ONLY)
4828

    
4829
#define MMUSUFFIX _mmu
4830

    
4831
#define SHIFT 0
4832
#include "softmmu_template.h"
4833

    
4834
#define SHIFT 1
4835
#include "softmmu_template.h"
4836

    
4837
#define SHIFT 2
4838
#include "softmmu_template.h"
4839

    
4840
#define SHIFT 3
4841
#include "softmmu_template.h"
4842

    
4843
#endif
4844

    
4845
#if !defined(CONFIG_USER_ONLY)
4846
/* try to fill the TLB and return an exception if error. If retaddr is
4847
   NULL, it means that the function was called in C code (i.e. not
4848
   from generated code or from helper.c) */
4849
/* XXX: fix it to restore all registers */
4850
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4851
{
4852
    TranslationBlock *tb;
4853
    int ret;
4854
    unsigned long pc;
4855
    CPUX86State *saved_env;
4856

    
4857
    /* XXX: hack to restore env in all cases, even if not called from
4858
       generated code */
4859
    saved_env = env;
4860
    env = cpu_single_env;
4861

    
4862
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4863
    if (ret) {
4864
        if (retaddr) {
4865
            /* now we have a real cpu fault */
4866
            pc = (unsigned long)retaddr;
4867
            tb = tb_find_pc(pc);
4868
            if (tb) {
4869
                /* the PC is inside the translated code. It means that we have
4870
                   a virtual CPU fault */
4871
                cpu_restore_state(tb, env, pc);
4872
            }
4873
        }
4874
        raise_exception_err(env->exception_index, env->error_code);
4875
    }
4876
    env = saved_env;
4877
}
4878
#endif
4879

    
4880
/* Secure Virtual Machine helpers */
4881

    
4882
#if defined(CONFIG_USER_ONLY)
4883

    
4884
void helper_vmrun(int aflag, int next_eip_addend)
4885
{ 
4886
}
4887
void helper_vmmcall(void) 
4888
{ 
4889
}
4890
void helper_vmload(int aflag)
4891
{ 
4892
}
4893
void helper_vmsave(int aflag)
4894
{ 
4895
}
4896
void helper_stgi(void)
4897
{
4898
}
4899
void helper_clgi(void)
4900
{
4901
}
4902
void helper_skinit(void) 
4903
{ 
4904
}
4905
void helper_invlpga(int aflag)
4906
{ 
4907
}
4908
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
4909
{ 
4910
}
4911
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4912
{
4913
}
4914

    
4915
void helper_svm_check_io(uint32_t port, uint32_t param, 
4916
                         uint32_t next_eip_addend)
4917
{
4918
}
4919
#else
4920

    
4921
static inline void svm_save_seg(target_phys_addr_t addr,
4922
                                const SegmentCache *sc)
4923
{
4924
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
4925
             sc->selector);
4926
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
4927
             sc->base);
4928
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
4929
             sc->limit);
4930
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
4931
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4932
}
4933
                                
4934
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4935
{
4936
    unsigned int flags;
4937

    
4938
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4939
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4940
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4941
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4942
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4943
}
4944

    
4945
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
4946
                                      CPUState *env, int seg_reg)
4947
{
4948
    SegmentCache sc1, *sc = &sc1;
4949
    svm_load_seg(addr, sc);
4950
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4951
                           sc->base, sc->limit, sc->flags);
4952
}
4953

    
4954
void helper_vmrun(int aflag, int next_eip_addend)
4955
{
4956
    target_ulong addr;
4957
    uint32_t event_inj;
4958
    uint32_t int_ctl;
4959

    
4960
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4961

    
4962
    if (aflag == 2)
4963
        addr = EAX;
4964
    else
4965
        addr = (uint32_t)EAX;
4966

    
4967
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4968

    
4969
    env->vm_vmcb = addr;
4970

    
4971
    /* save the current CPU state in the hsave page */
4972
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4973
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4974

    
4975
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4976
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4977

    
4978
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4979
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4980
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4981
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4982
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4983
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4984

    
4985
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4986
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4987

    
4988
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
4989
                  &env->segs[R_ES]);
4990
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
4991
                 &env->segs[R_CS]);
4992
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
4993
                 &env->segs[R_SS]);
4994
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
4995
                 &env->segs[R_DS]);
4996

    
4997
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4998
             EIP + next_eip_addend);
4999
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5000
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5001

    
5002
    /* load the interception bitmaps so we do not need to access the
5003
       vmcb in svm mode */
5004
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5005
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5006
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5007
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5008
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5009
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5010

    
5011
    /* enable intercepts */
5012
    env->hflags |= HF_SVMI_MASK;
5013

    
5014
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5015

    
5016
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5017
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5018

    
5019
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5020
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5021

    
5022
    /* clear exit_info_2 so we behave like the real hardware */
5023
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5024

    
5025
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5026
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5027
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5028
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5029
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5030
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5031
    if (int_ctl & V_INTR_MASKING_MASK) {
5032
        env->v_tpr = int_ctl & V_TPR_MASK;
5033
        env->hflags2 |= HF2_VINTR_MASK;
5034
        if (env->eflags & IF_MASK)
5035
            env->hflags2 |= HF2_HIF_MASK;
5036
    }
5037

    
5038
    cpu_load_efer(env, 
5039
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5040
    env->eflags = 0;
5041
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5042
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5043
    CC_OP = CC_OP_EFLAGS;
5044

    
5045
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5046
                       env, R_ES);
5047
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5048
                       env, R_CS);
5049
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5050
                       env, R_SS);
5051
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5052
                       env, R_DS);
5053

    
5054
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5055
    env->eip = EIP;
5056
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5057
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5058
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5059
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5060
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5061

    
5062
    /* FIXME: guest state consistency checks */
5063

    
5064
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5065
        case TLB_CONTROL_DO_NOTHING:
5066
            break;
5067
        case TLB_CONTROL_FLUSH_ALL_ASID:
5068
            /* FIXME: this is not 100% correct but should work for now */
5069
            tlb_flush(env, 1);
5070
        break;
5071
    }
5072

    
5073
    env->hflags2 |= HF2_GIF_MASK;
5074

    
5075
    if (int_ctl & V_IRQ_MASK) {
5076
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5077
    }
5078

    
5079
    /* maybe we need to inject an event */
5080
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5081
    if (event_inj & SVM_EVTINJ_VALID) {
5082
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5083
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5084
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5085

    
5086
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5087
        /* FIXME: need to implement valid_err */
5088
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5089
        case SVM_EVTINJ_TYPE_INTR:
5090
                env->exception_index = vector;
5091
                env->error_code = event_inj_err;
5092
                env->exception_is_int = 0;
5093
                env->exception_next_eip = -1;
5094
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5095
                /* XXX: is it always correct ? */
5096
                do_interrupt(vector, 0, 0, 0, 1);
5097
                break;
5098
        case SVM_EVTINJ_TYPE_NMI:
5099
                env->exception_index = EXCP02_NMI;
5100
                env->error_code = event_inj_err;
5101
                env->exception_is_int = 0;
5102
                env->exception_next_eip = EIP;
5103
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5104
                cpu_loop_exit();
5105
                break;
5106
        case SVM_EVTINJ_TYPE_EXEPT:
5107
                env->exception_index = vector;
5108
                env->error_code = event_inj_err;
5109
                env->exception_is_int = 0;
5110
                env->exception_next_eip = -1;
5111
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5112
                cpu_loop_exit();
5113
                break;
5114
        case SVM_EVTINJ_TYPE_SOFT:
5115
                env->exception_index = vector;
5116
                env->error_code = event_inj_err;
5117
                env->exception_is_int = 1;
5118
                env->exception_next_eip = EIP;
5119
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5120
                cpu_loop_exit();
5121
                break;
5122
        }
5123
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5124
    }
5125
}
5126

    
5127
void helper_vmmcall(void)
5128
{
5129
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5130
    raise_exception(EXCP06_ILLOP);
5131
}
5132

    
5133
void helper_vmload(int aflag)
5134
{
5135
    target_ulong addr;
5136
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5137

    
5138
    if (aflag == 2)
5139
        addr = EAX;
5140
    else
5141
        addr = (uint32_t)EAX;
5142

    
5143
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5144
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5145
                env->segs[R_FS].base);
5146

    
5147
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5148
                       env, R_FS);
5149
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5150
                       env, R_GS);
5151
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5152
                 &env->tr);
5153
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5154
                 &env->ldt);
5155

    
5156
#ifdef TARGET_X86_64
5157
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5158
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5159
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5160
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5161
#endif
5162
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5163
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5164
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5165
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5166
}
5167

    
5168
void helper_vmsave(int aflag)
5169
{
5170
    target_ulong addr;
5171
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5172

    
5173
    if (aflag == 2)
5174
        addr = EAX;
5175
    else
5176
        addr = (uint32_t)EAX;
5177

    
5178
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5179
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5180
                env->segs[R_FS].base);
5181

    
5182
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5183
                 &env->segs[R_FS]);
5184
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5185
                 &env->segs[R_GS]);
5186
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5187
                 &env->tr);
5188
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5189
                 &env->ldt);
5190

    
5191
#ifdef TARGET_X86_64
5192
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5193
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5194
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5195
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5196
#endif
5197
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5198
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5199
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5200
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5201
}
5202

    
5203
void helper_stgi(void)
5204
{
5205
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5206
    env->hflags2 |= HF2_GIF_MASK;
5207
}
5208

    
5209
void helper_clgi(void)
5210
{
5211
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5212
    env->hflags2 &= ~HF2_GIF_MASK;
5213
}
5214

    
5215
void helper_skinit(void)
5216
{
5217
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5218
    /* XXX: not implemented */
5219
    raise_exception(EXCP06_ILLOP);
5220
}
5221

    
5222
void helper_invlpga(int aflag)
5223
{
5224
    target_ulong addr;
5225
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5226
    
5227
    if (aflag == 2)
5228
        addr = EAX;
5229
    else
5230
        addr = (uint32_t)EAX;
5231

    
5232
    /* XXX: could use the ASID to see if it is needed to do the
5233
       flush */
5234
    tlb_flush_page(env, addr);
5235
}
5236

    
5237
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5238
{
5239
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5240
        return;
5241
    switch(type) {
5242
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5243
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5244
            helper_vmexit(type, param);
5245
        }
5246
        break;
5247
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5248
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5249
            helper_vmexit(type, param);
5250
        }
5251
        break;
5252
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5253
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5254
            helper_vmexit(type, param);
5255
        }
5256
        break;
5257
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5258
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5259
            helper_vmexit(type, param);
5260
        }
5261
        break;
5262
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5263
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5264
            helper_vmexit(type, param);
5265
        }
5266
        break;
5267
    case SVM_EXIT_MSR:
5268
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5269
            /* FIXME: this should be read in at vmrun (faster this way?) */
5270
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5271
            uint32_t t0, t1;
5272
            switch((uint32_t)ECX) {
5273
            case 0 ... 0x1fff:
5274
                t0 = (ECX * 2) % 8;
5275
                t1 = (ECX * 2) / 8;
5276
                break;
5277
            case 0xc0000000 ... 0xc0001fff:
5278
                t0 = (8192 + ECX - 0xc0000000) * 2;
5279
                t1 = (t0 / 8);
5280
                t0 %= 8;
5281
                break;
5282
            case 0xc0010000 ... 0xc0011fff:
5283
                t0 = (16384 + ECX - 0xc0010000) * 2;
5284
                t1 = (t0 / 8);
5285
                t0 %= 8;
5286
                break;
5287
            default:
5288
                helper_vmexit(type, param);
5289
                t0 = 0;
5290
                t1 = 0;
5291
                break;
5292
            }
5293
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5294
                helper_vmexit(type, param);
5295
        }
5296
        break;
5297
    default:
5298
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5299
            helper_vmexit(type, param);
5300
        }
5301
        break;
5302
    }
5303
}
5304

    
5305
void helper_svm_check_io(uint32_t port, uint32_t param, 
5306
                         uint32_t next_eip_addend)
5307
{
5308
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5309
        /* FIXME: this should be read in at vmrun (faster this way?) */
5310
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5311
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5312
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5313
            /* next EIP */
5314
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5315
                     env->eip + next_eip_addend);
5316
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5317
        }
5318
    }
5319
}
5320

    
5321
/* Note: currently only 32 bits of exit_code are used */
5322
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5323
{
5324
    uint32_t int_ctl;
5325

    
5326
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5327
                exit_code, exit_info_1,
5328
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5329
                EIP);
5330

    
5331
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5332
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5333
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5334
    } else {
5335
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5336
    }
5337

    
5338
    /* Save the VM state in the vmcb */
5339
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5340
                 &env->segs[R_ES]);
5341
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5342
                 &env->segs[R_CS]);
5343
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5344
                 &env->segs[R_SS]);
5345
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5346
                 &env->segs[R_DS]);
5347

    
5348
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5349
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5350

    
5351
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5352
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5353

    
5354
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5355
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5356
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5357
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5358
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5359

    
5360
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5361
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5362
    int_ctl |= env->v_tpr & V_TPR_MASK;
5363
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5364
        int_ctl |= V_IRQ_MASK;
5365
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5366

    
5367
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5368
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5369
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5370
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5371
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5372
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5373
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5374

    
5375
    /* Reload the host state from vm_hsave */
5376
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5377
    env->hflags &= ~HF_SVMI_MASK;
5378
    env->intercept = 0;
5379
    env->intercept_exceptions = 0;
5380
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5381
    env->tsc_offset = 0;
5382

    
5383
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5384
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5385

    
5386
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5387
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5388

    
5389
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5390
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5391
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5392
    /* we need to set the efer after the crs so the hidden flags get
5393
       set properly */
5394
    cpu_load_efer(env, 
5395
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5396
    env->eflags = 0;
5397
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5398
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5399
    CC_OP = CC_OP_EFLAGS;
5400

    
5401
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5402
                       env, R_ES);
5403
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5404
                       env, R_CS);
5405
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5406
                       env, R_SS);
5407
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5408
                       env, R_DS);
5409

    
5410
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5411
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5412
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5413

    
5414
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5415
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5416

    
5417
    /* other setups */
5418
    cpu_x86_set_cpl(env, 0);
5419
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5420
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5421

    
5422
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5423
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5424
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5425
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5426
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5427

    
5428
    env->hflags2 &= ~HF2_GIF_MASK;
5429
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5430

    
5431
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5432

    
5433
    /* Clears the TSC_OFFSET inside the processor. */
5434

    
5435
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5436
       from the page table indicated the host's CR3. If the PDPEs contain
5437
       illegal state, the processor causes a shutdown. */
5438

    
5439
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5440
    env->cr[0] |= CR0_PE_MASK;
5441
    env->eflags &= ~VM_MASK;
5442

    
5443
    /* Disables all breakpoints in the host DR7 register. */
5444

    
5445
    /* Checks the reloaded host state for consistency. */
5446

    
5447
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5448
       host's code segment or non-canonical (in the case of long mode), a
5449
       #GP fault is delivered inside the host.) */
5450

    
5451
    /* remove any pending exception */
5452
    env->exception_index = -1;
5453
    env->error_code = 0;
5454
    env->old_exception = -1;
5455

    
5456
    cpu_loop_exit();
5457
}
5458

    
5459
#endif
5460

    
5461
/* MMX/SSE */
5462
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5463
void helper_enter_mmx(void)
5464
{
5465
    env->fpstt = 0;
5466
    *(uint32_t *)(env->fptags) = 0;
5467
    *(uint32_t *)(env->fptags + 4) = 0;
5468
}
5469

    
5470
void helper_emms(void)
5471
{
5472
    /* set to empty state */
5473
    *(uint32_t *)(env->fptags) = 0x01010101;
5474
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5475
}
5476

    
5477
/* XXX: suppress */
5478
void helper_movq(void *d, void *s)
5479
{
5480
    *(uint64_t *)d = *(uint64_t *)s;
5481
}
5482

    
5483
#define SHIFT 0
5484
#include "ops_sse.h"
5485

    
5486
#define SHIFT 1
5487
#include "ops_sse.h"
5488

    
5489
#define SHIFT 0
5490
#include "helper_template.h"
5491
#undef SHIFT
5492

    
5493
#define SHIFT 1
5494
#include "helper_template.h"
5495
#undef SHIFT
5496

    
5497
#define SHIFT 2
5498
#include "helper_template.h"
5499
#undef SHIFT
5500

    
5501
#ifdef TARGET_X86_64
5502

    
5503
#define SHIFT 3
5504
#include "helper_template.h"
5505
#undef SHIFT
5506

    
5507
#endif
5508

    
5509
/* bit operations */
5510
target_ulong helper_bsf(target_ulong t0)
5511
{
5512
    int count;
5513
    target_ulong res;
5514

    
5515
    res = t0;
5516
    count = 0;
5517
    while ((res & 1) == 0) {
5518
        count++;
5519
        res >>= 1;
5520
    }
5521
    return count;
5522
}
5523

    
5524
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5525
{
5526
    int count;
5527
    target_ulong res, mask;
5528

    
5529
    if (wordsize > 0 && t0 == 0) {
5530
        return wordsize;
5531
    }
5532
    res = t0;
5533
    count = TARGET_LONG_BITS - 1;
5534
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5535
    while ((res & mask) == 0) {
5536
        count--;
5537
        res <<= 1;
5538
    }
5539
    if (wordsize > 0) {
5540
        return wordsize - 1 - count;
5541
    }
5542
    return count;
5543
}
5544

    
5545
target_ulong helper_bsr(target_ulong t0)
5546
{
5547
        return helper_lzcnt(t0, 0);
5548
}
5549

    
5550
static int compute_all_eflags(void)
5551
{
5552
    return CC_SRC;
5553
}
5554

    
5555
static int compute_c_eflags(void)
5556
{
5557
    return CC_SRC & CC_C;
5558
}
5559

    
5560
uint32_t helper_cc_compute_all(int op)
5561
{
5562
    switch (op) {
5563
    default: /* should never happen */ return 0;
5564

    
5565
    case CC_OP_EFLAGS: return compute_all_eflags();
5566

    
5567
    case CC_OP_MULB: return compute_all_mulb();
5568
    case CC_OP_MULW: return compute_all_mulw();
5569
    case CC_OP_MULL: return compute_all_mull();
5570

    
5571
    case CC_OP_ADDB: return compute_all_addb();
5572
    case CC_OP_ADDW: return compute_all_addw();
5573
    case CC_OP_ADDL: return compute_all_addl();
5574

    
5575
    case CC_OP_ADCB: return compute_all_adcb();
5576
    case CC_OP_ADCW: return compute_all_adcw();
5577
    case CC_OP_ADCL: return compute_all_adcl();
5578

    
5579
    case CC_OP_SUBB: return compute_all_subb();
5580
    case CC_OP_SUBW: return compute_all_subw();
5581
    case CC_OP_SUBL: return compute_all_subl();
5582

    
5583
    case CC_OP_SBBB: return compute_all_sbbb();
5584
    case CC_OP_SBBW: return compute_all_sbbw();
5585
    case CC_OP_SBBL: return compute_all_sbbl();
5586

    
5587
    case CC_OP_LOGICB: return compute_all_logicb();
5588
    case CC_OP_LOGICW: return compute_all_logicw();
5589
    case CC_OP_LOGICL: return compute_all_logicl();
5590

    
5591
    case CC_OP_INCB: return compute_all_incb();
5592
    case CC_OP_INCW: return compute_all_incw();
5593
    case CC_OP_INCL: return compute_all_incl();
5594

    
5595
    case CC_OP_DECB: return compute_all_decb();
5596
    case CC_OP_DECW: return compute_all_decw();
5597
    case CC_OP_DECL: return compute_all_decl();
5598

    
5599
    case CC_OP_SHLB: return compute_all_shlb();
5600
    case CC_OP_SHLW: return compute_all_shlw();
5601
    case CC_OP_SHLL: return compute_all_shll();
5602

    
5603
    case CC_OP_SARB: return compute_all_sarb();
5604
    case CC_OP_SARW: return compute_all_sarw();
5605
    case CC_OP_SARL: return compute_all_sarl();
5606

    
5607
#ifdef TARGET_X86_64
5608
    case CC_OP_MULQ: return compute_all_mulq();
5609

    
5610
    case CC_OP_ADDQ: return compute_all_addq();
5611

    
5612
    case CC_OP_ADCQ: return compute_all_adcq();
5613

    
5614
    case CC_OP_SUBQ: return compute_all_subq();
5615

    
5616
    case CC_OP_SBBQ: return compute_all_sbbq();
5617

    
5618
    case CC_OP_LOGICQ: return compute_all_logicq();
5619

    
5620
    case CC_OP_INCQ: return compute_all_incq();
5621

    
5622
    case CC_OP_DECQ: return compute_all_decq();
5623

    
5624
    case CC_OP_SHLQ: return compute_all_shlq();
5625

    
5626
    case CC_OP_SARQ: return compute_all_sarq();
5627
#endif
5628
    }
5629
}
5630

    
5631
uint32_t helper_cc_compute_c(int op)
5632
{
5633
    switch (op) {
5634
    default: /* should never happen */ return 0;
5635

    
5636
    case CC_OP_EFLAGS: return compute_c_eflags();
5637

    
5638
    case CC_OP_MULB: return compute_c_mull();
5639
    case CC_OP_MULW: return compute_c_mull();
5640
    case CC_OP_MULL: return compute_c_mull();
5641

    
5642
    case CC_OP_ADDB: return compute_c_addb();
5643
    case CC_OP_ADDW: return compute_c_addw();
5644
    case CC_OP_ADDL: return compute_c_addl();
5645

    
5646
    case CC_OP_ADCB: return compute_c_adcb();
5647
    case CC_OP_ADCW: return compute_c_adcw();
5648
    case CC_OP_ADCL: return compute_c_adcl();
5649

    
5650
    case CC_OP_SUBB: return compute_c_subb();
5651
    case CC_OP_SUBW: return compute_c_subw();
5652
    case CC_OP_SUBL: return compute_c_subl();
5653

    
5654
    case CC_OP_SBBB: return compute_c_sbbb();
5655
    case CC_OP_SBBW: return compute_c_sbbw();
5656
    case CC_OP_SBBL: return compute_c_sbbl();
5657

    
5658
    case CC_OP_LOGICB: return compute_c_logicb();
5659
    case CC_OP_LOGICW: return compute_c_logicw();
5660
    case CC_OP_LOGICL: return compute_c_logicl();
5661

    
5662
    case CC_OP_INCB: return compute_c_incl();
5663
    case CC_OP_INCW: return compute_c_incl();
5664
    case CC_OP_INCL: return compute_c_incl();
5665

    
5666
    case CC_OP_DECB: return compute_c_incl();
5667
    case CC_OP_DECW: return compute_c_incl();
5668
    case CC_OP_DECL: return compute_c_incl();
5669

    
5670
    case CC_OP_SHLB: return compute_c_shlb();
5671
    case CC_OP_SHLW: return compute_c_shlw();
5672
    case CC_OP_SHLL: return compute_c_shll();
5673

    
5674
    case CC_OP_SARB: return compute_c_sarl();
5675
    case CC_OP_SARW: return compute_c_sarl();
5676
    case CC_OP_SARL: return compute_c_sarl();
5677

    
5678
#ifdef TARGET_X86_64
5679
    case CC_OP_MULQ: return compute_c_mull();
5680

    
5681
    case CC_OP_ADDQ: return compute_c_addq();
5682

    
5683
    case CC_OP_ADCQ: return compute_c_adcq();
5684

    
5685
    case CC_OP_SUBQ: return compute_c_subq();
5686

    
5687
    case CC_OP_SBBQ: return compute_c_sbbq();
5688

    
5689
    case CC_OP_LOGICQ: return compute_c_logicq();
5690

    
5691
    case CC_OP_INCQ: return compute_c_incl();
5692

    
5693
    case CC_OP_DECQ: return compute_c_incl();
5694

    
5695
    case CC_OP_SHLQ: return compute_c_shlq();
5696

    
5697
    case CC_OP_SARQ: return compute_c_sarl();
5698
#endif
5699
    }
5700
}