Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ c6bfc164

History | View | Annotate | Download (165.4 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include <math.h>
21
#include "cpu.h"
22
#include "dyngen-exec.h"
23
#include "host-utils.h"
24
#include "ioport.h"
25
#include "qemu-common.h"
26
#include "qemu-log.h"
27
#include "cpu-defs.h"
28
#include "helper.h"
29

    
30
#if !defined(CONFIG_USER_ONLY)
31
#include "softmmu_exec.h"
32
#endif /* !defined(CONFIG_USER_ONLY) */
33

    
34
//#define DEBUG_PCALL
35

    
36
#ifdef DEBUG_PCALL
37
#  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
38
#  define LOG_PCALL_STATE(env) \
39
          log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
40
#else
41
#  define LOG_PCALL(...) do { } while (0)
42
#  define LOG_PCALL_STATE(env) do { } while (0)
43
#endif
44

    
45
/* n must be a constant to be efficient */
46
static inline target_long lshift(target_long x, int n)
47
{
48
    if (n >= 0) {
49
        return x << n;
50
    } else {
51
        return x >> (-n);
52
    }
53
}
54

    
55
#define RC_MASK         0xc00
56
#define RC_NEAR         0x000
57
#define RC_DOWN         0x400
58
#define RC_UP           0x800
59
#define RC_CHOP         0xc00
60

    
61
#define MAXTAN 9223372036854775808.0
62

    
63
/* the following deal with x86 long double-precision numbers */
64
#define MAXEXPD 0x7fff
65
#define EXPBIAS 16383
66
#define EXPD(fp)        (fp.l.upper & 0x7fff)
67
#define SIGND(fp)       ((fp.l.upper) & 0x8000)
68
#define MANTD(fp)       (fp.l.lower)
69
#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
70

    
71
static inline void fpush(void)
72
{
73
    env->fpstt = (env->fpstt - 1) & 7;
74
    env->fptags[env->fpstt] = 0; /* validate stack entry */
75
}
76

    
77
static inline void fpop(void)
78
{
79
    env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
80
    env->fpstt = (env->fpstt + 1) & 7;
81
}
82

    
83
static inline floatx80 helper_fldt(target_ulong ptr)
84
{
85
    CPU_LDoubleU temp;
86

    
87
    temp.l.lower = ldq(ptr);
88
    temp.l.upper = lduw(ptr + 8);
89
    return temp.d;
90
}
91

    
92
static inline void helper_fstt(floatx80 f, target_ulong ptr)
93
{
94
    CPU_LDoubleU temp;
95

    
96
    temp.d = f;
97
    stq(ptr, temp.l.lower);
98
    stw(ptr + 8, temp.l.upper);
99
}
100

    
101
#define FPUS_IE (1 << 0)
102
#define FPUS_DE (1 << 1)
103
#define FPUS_ZE (1 << 2)
104
#define FPUS_OE (1 << 3)
105
#define FPUS_UE (1 << 4)
106
#define FPUS_PE (1 << 5)
107
#define FPUS_SF (1 << 6)
108
#define FPUS_SE (1 << 7)
109
#define FPUS_B  (1 << 15)
110

    
111
#define FPUC_EM 0x3f
112

    
113
static inline uint32_t compute_eflags(void)
114
{
115
    return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
116
}
117

    
118
/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
119
static inline void load_eflags(int eflags, int update_mask)
120
{
121
    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
122
    DF = 1 - (2 * ((eflags >> 10) & 1));
123
    env->eflags = (env->eflags & ~update_mask) |
124
        (eflags & update_mask) | 0x2;
125
}
126

    
127
/* load efer and update the corresponding hflags. XXX: do consistency
128
   checks with cpuid bits ? */
129
static inline void cpu_load_efer(CPUState *env, uint64_t val)
130
{
131
    env->efer = val;
132
    env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
133
    if (env->efer & MSR_EFER_LMA) {
134
        env->hflags |= HF_LMA_MASK;
135
    }
136
    if (env->efer & MSR_EFER_SVME) {
137
        env->hflags |= HF_SVME_MASK;
138
    }
139
}
140

    
141
#if 0
142
#define raise_exception_err(a, b)\
143
do {\
144
    qemu_log("raise_exception line=%d\n", __LINE__);\
145
    (raise_exception_err)(a, b);\
146
} while (0)
147
#endif
148

    
149
static void QEMU_NORETURN raise_exception_err(int exception_index,
150
                                              int error_code);
151

    
152
static const uint8_t parity_table[256] = {
153
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
154
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
156
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
157
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
158
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
160
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
162
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
164
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
165
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
166
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
168
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
169
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
170
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
172
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
173
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
174
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
176
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
178
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
180
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
181
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
182
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
184
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
185
};
186

    
187
/* modulo 17 table */
188
static const uint8_t rclw_table[32] = {
189
    0, 1, 2, 3, 4, 5, 6, 7,
190
    8, 9,10,11,12,13,14,15,
191
   16, 0, 1, 2, 3, 4, 5, 6,
192
    7, 8, 9,10,11,12,13,14,
193
};
194

    
195
/* modulo 9 table */
196
static const uint8_t rclb_table[32] = {
197
    0, 1, 2, 3, 4, 5, 6, 7,
198
    8, 0, 1, 2, 3, 4, 5, 6,
199
    7, 8, 0, 1, 2, 3, 4, 5,
200
    6, 7, 8, 0, 1, 2, 3, 4,
201
};
202

    
203
#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
204
#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
205
#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
206

    
207
/* broken thread support */
208

    
209
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
210

    
211
void helper_lock(void)
212
{
213
    spin_lock(&global_cpu_lock);
214
}
215

    
216
void helper_unlock(void)
217
{
218
    spin_unlock(&global_cpu_lock);
219
}
220

    
221
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
222
{
223
    load_eflags(t0, update_mask);
224
}
225

    
226
target_ulong helper_read_eflags(void)
227
{
228
    uint32_t eflags;
229
    eflags = helper_cc_compute_all(CC_OP);
230
    eflags |= (DF & DF_MASK);
231
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
232
    return eflags;
233
}
234

    
235
/* return non zero if error */
236
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
237
                               int selector)
238
{
239
    SegmentCache *dt;
240
    int index;
241
    target_ulong ptr;
242

    
243
    if (selector & 0x4)
244
        dt = &env->ldt;
245
    else
246
        dt = &env->gdt;
247
    index = selector & ~7;
248
    if ((index + 7) > dt->limit)
249
        return -1;
250
    ptr = dt->base + index;
251
    *e1_ptr = ldl_kernel(ptr);
252
    *e2_ptr = ldl_kernel(ptr + 4);
253
    return 0;
254
}
255

    
256
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257
{
258
    unsigned int limit;
259
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260
    if (e2 & DESC_G_MASK)
261
        limit = (limit << 12) | 0xfff;
262
    return limit;
263
}
264

    
265
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266
{
267
    return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268
}
269

    
270
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271
{
272
    sc->base = get_seg_base(e1, e2);
273
    sc->limit = get_seg_limit(e1, e2);
274
    sc->flags = e2;
275
}
276

    
277
/* init the segment cache in vm86 mode. */
278
static inline void load_seg_vm(int seg, int selector)
279
{
280
    selector &= 0xffff;
281
    cpu_x86_load_seg_cache(env, seg, selector,
282
                           (selector << 4), 0xffff, 0);
283
}
284

    
285
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
286
                                       uint32_t *esp_ptr, int dpl)
287
{
288
    int type, index, shift;
289

    
290
#if 0
291
    {
292
        int i;
293
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
294
        for(i=0;i<env->tr.limit;i++) {
295
            printf("%02x ", env->tr.base[i]);
296
            if ((i & 7) == 7) printf("\n");
297
        }
298
        printf("\n");
299
    }
300
#endif
301

    
302
    if (!(env->tr.flags & DESC_P_MASK))
303
        cpu_abort(env, "invalid tss");
304
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
305
    if ((type & 7) != 1)
306
        cpu_abort(env, "invalid tss type");
307
    shift = type >> 3;
308
    index = (dpl * 4 + 2) << shift;
309
    if (index + (4 << shift) - 1 > env->tr.limit)
310
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
311
    if (shift == 0) {
312
        *esp_ptr = lduw_kernel(env->tr.base + index);
313
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
314
    } else {
315
        *esp_ptr = ldl_kernel(env->tr.base + index);
316
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
317
    }
318
}
319

    
320
/* XXX: merge with load_seg() */
321
static void tss_load_seg(int seg_reg, int selector)
322
{
323
    uint32_t e1, e2;
324
    int rpl, dpl, cpl;
325

    
326
    if ((selector & 0xfffc) != 0) {
327
        if (load_segment(&e1, &e2, selector) != 0)
328
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
329
        if (!(e2 & DESC_S_MASK))
330
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
331
        rpl = selector & 3;
332
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
333
        cpl = env->hflags & HF_CPL_MASK;
334
        if (seg_reg == R_CS) {
335
            if (!(e2 & DESC_CS_MASK))
336
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
337
            /* XXX: is it correct ? */
338
            if (dpl != rpl)
339
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340
            if ((e2 & DESC_C_MASK) && dpl > rpl)
341
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342
        } else if (seg_reg == R_SS) {
343
            /* SS must be writable data */
344
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
345
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346
            if (dpl != cpl || dpl != rpl)
347
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348
        } else {
349
            /* not readable code */
350
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
351
                raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352
            /* if data or non conforming code, checks the rights */
353
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
354
                if (dpl < cpl || dpl < rpl)
355
                    raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356
            }
357
        }
358
        if (!(e2 & DESC_P_MASK))
359
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
360
        cpu_x86_load_seg_cache(env, seg_reg, selector,
361
                       get_seg_base(e1, e2),
362
                       get_seg_limit(e1, e2),
363
                       e2);
364
    } else {
365
        if (seg_reg == R_SS || seg_reg == R_CS)
366
            raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367
    }
368
}
369

    
370
#define SWITCH_TSS_JMP  0
371
#define SWITCH_TSS_IRET 1
372
#define SWITCH_TSS_CALL 2
373

    
374
/* XXX: restore CPU state in registers (PowerPC case) */
375
static void switch_tss(int tss_selector,
376
                       uint32_t e1, uint32_t e2, int source,
377
                       uint32_t next_eip)
378
{
379
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
380
    target_ulong tss_base;
381
    uint32_t new_regs[8], new_segs[6];
382
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
383
    uint32_t old_eflags, eflags_mask;
384
    SegmentCache *dt;
385
    int index;
386
    target_ulong ptr;
387

    
388
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
389
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
390

    
391
    /* if task gate, we read the TSS segment and we load it */
392
    if (type == 5) {
393
        if (!(e2 & DESC_P_MASK))
394
            raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
395
        tss_selector = e1 >> 16;
396
        if (tss_selector & 4)
397
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
398
        if (load_segment(&e1, &e2, tss_selector) != 0)
399
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
400
        if (e2 & DESC_S_MASK)
401
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
402
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
403
        if ((type & 7) != 1)
404
            raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
405
    }
406

    
407
    if (!(e2 & DESC_P_MASK))
408
        raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
409

    
410
    if (type & 8)
411
        tss_limit_max = 103;
412
    else
413
        tss_limit_max = 43;
414
    tss_limit = get_seg_limit(e1, e2);
415
    tss_base = get_seg_base(e1, e2);
416
    if ((tss_selector & 4) != 0 ||
417
        tss_limit < tss_limit_max)
418
        raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
419
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
420
    if (old_type & 8)
421
        old_tss_limit_max = 103;
422
    else
423
        old_tss_limit_max = 43;
424

    
425
    /* read all the registers from the new TSS */
426
    if (type & 8) {
427
        /* 32 bit */
428
        new_cr3 = ldl_kernel(tss_base + 0x1c);
429
        new_eip = ldl_kernel(tss_base + 0x20);
430
        new_eflags = ldl_kernel(tss_base + 0x24);
431
        for(i = 0; i < 8; i++)
432
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
433
        for(i = 0; i < 6; i++)
434
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
435
        new_ldt = lduw_kernel(tss_base + 0x60);
436
        new_trap = ldl_kernel(tss_base + 0x64);
437
    } else {
438
        /* 16 bit */
439
        new_cr3 = 0;
440
        new_eip = lduw_kernel(tss_base + 0x0e);
441
        new_eflags = lduw_kernel(tss_base + 0x10);
442
        for(i = 0; i < 8; i++)
443
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
444
        for(i = 0; i < 4; i++)
445
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
446
        new_ldt = lduw_kernel(tss_base + 0x2a);
447
        new_segs[R_FS] = 0;
448
        new_segs[R_GS] = 0;
449
        new_trap = 0;
450
    }
451
    /* XXX: avoid a compiler warning, see
452
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
453
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
454
    (void)new_trap;
455

    
456
    /* NOTE: we must avoid memory exceptions during the task switch,
457
       so we make dummy accesses before */
458
    /* XXX: it can still fail in some cases, so a bigger hack is
459
       necessary to valid the TLB after having done the accesses */
460

    
461
    v1 = ldub_kernel(env->tr.base);
462
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
463
    stb_kernel(env->tr.base, v1);
464
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
465

    
466
    /* clear busy bit (it is restartable) */
467
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
468
        target_ulong ptr;
469
        uint32_t e2;
470
        ptr = env->gdt.base + (env->tr.selector & ~7);
471
        e2 = ldl_kernel(ptr + 4);
472
        e2 &= ~DESC_TSS_BUSY_MASK;
473
        stl_kernel(ptr + 4, e2);
474
    }
475
    old_eflags = compute_eflags();
476
    if (source == SWITCH_TSS_IRET)
477
        old_eflags &= ~NT_MASK;
478

    
479
    /* save the current state in the old TSS */
480
    if (type & 8) {
481
        /* 32 bit */
482
        stl_kernel(env->tr.base + 0x20, next_eip);
483
        stl_kernel(env->tr.base + 0x24, old_eflags);
484
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
485
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
486
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
487
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
488
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
489
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
490
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
491
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
492
        for(i = 0; i < 6; i++)
493
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
494
    } else {
495
        /* 16 bit */
496
        stw_kernel(env->tr.base + 0x0e, next_eip);
497
        stw_kernel(env->tr.base + 0x10, old_eflags);
498
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
499
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
500
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
501
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
502
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
503
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
504
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
505
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
506
        for(i = 0; i < 4; i++)
507
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
508
    }
509

    
510
    /* now if an exception occurs, it will occurs in the next task
511
       context */
512

    
513
    if (source == SWITCH_TSS_CALL) {
514
        stw_kernel(tss_base, env->tr.selector);
515
        new_eflags |= NT_MASK;
516
    }
517

    
518
    /* set busy bit */
519
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
520
        target_ulong ptr;
521
        uint32_t e2;
522
        ptr = env->gdt.base + (tss_selector & ~7);
523
        e2 = ldl_kernel(ptr + 4);
524
        e2 |= DESC_TSS_BUSY_MASK;
525
        stl_kernel(ptr + 4, e2);
526
    }
527

    
528
    /* set the new CPU state */
529
    /* from this point, any exception which occurs can give problems */
530
    env->cr[0] |= CR0_TS_MASK;
531
    env->hflags |= HF_TS_MASK;
532
    env->tr.selector = tss_selector;
533
    env->tr.base = tss_base;
534
    env->tr.limit = tss_limit;
535
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
536

    
537
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
538
        cpu_x86_update_cr3(env, new_cr3);
539
    }
540

    
541
    /* load all registers without an exception, then reload them with
542
       possible exception */
543
    env->eip = new_eip;
544
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
545
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
546
    if (!(type & 8))
547
        eflags_mask &= 0xffff;
548
    load_eflags(new_eflags, eflags_mask);
549
    /* XXX: what to do in 16 bit case ? */
550
    EAX = new_regs[0];
551
    ECX = new_regs[1];
552
    EDX = new_regs[2];
553
    EBX = new_regs[3];
554
    ESP = new_regs[4];
555
    EBP = new_regs[5];
556
    ESI = new_regs[6];
557
    EDI = new_regs[7];
558
    if (new_eflags & VM_MASK) {
559
        for(i = 0; i < 6; i++)
560
            load_seg_vm(i, new_segs[i]);
561
        /* in vm86, CPL is always 3 */
562
        cpu_x86_set_cpl(env, 3);
563
    } else {
564
        /* CPL is set the RPL of CS */
565
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
566
        /* first just selectors as the rest may trigger exceptions */
567
        for(i = 0; i < 6; i++)
568
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
569
    }
570

    
571
    env->ldt.selector = new_ldt & ~4;
572
    env->ldt.base = 0;
573
    env->ldt.limit = 0;
574
    env->ldt.flags = 0;
575

    
576
    /* load the LDT */
577
    if (new_ldt & 4)
578
        raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
579

    
580
    if ((new_ldt & 0xfffc) != 0) {
581
        dt = &env->gdt;
582
        index = new_ldt & ~7;
583
        if ((index + 7) > dt->limit)
584
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
585
        ptr = dt->base + index;
586
        e1 = ldl_kernel(ptr);
587
        e2 = ldl_kernel(ptr + 4);
588
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
589
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
590
        if (!(e2 & DESC_P_MASK))
591
            raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
592
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
593
    }
594

    
595
    /* load the segments */
596
    if (!(new_eflags & VM_MASK)) {
597
        tss_load_seg(R_CS, new_segs[R_CS]);
598
        tss_load_seg(R_SS, new_segs[R_SS]);
599
        tss_load_seg(R_ES, new_segs[R_ES]);
600
        tss_load_seg(R_DS, new_segs[R_DS]);
601
        tss_load_seg(R_FS, new_segs[R_FS]);
602
        tss_load_seg(R_GS, new_segs[R_GS]);
603
    }
604

    
605
    /* check that EIP is in the CS segment limits */
606
    if (new_eip > env->segs[R_CS].limit) {
607
        /* XXX: different exception if CALL ? */
608
        raise_exception_err(EXCP0D_GPF, 0);
609
    }
610

    
611
#ifndef CONFIG_USER_ONLY
612
    /* reset local breakpoints */
613
    if (env->dr[7] & 0x55) {
614
        for (i = 0; i < 4; i++) {
615
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
616
                hw_breakpoint_remove(env, i);
617
        }
618
        env->dr[7] &= ~0x55;
619
    }
620
#endif
621
}
622

    
623
/* check if Port I/O is allowed in TSS */
624
static inline void check_io(int addr, int size)
625
{
626
    int io_offset, val, mask;
627

    
628
    /* TSS must be a valid 32 bit one */
629
    if (!(env->tr.flags & DESC_P_MASK) ||
630
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
631
        env->tr.limit < 103)
632
        goto fail;
633
    io_offset = lduw_kernel(env->tr.base + 0x66);
634
    io_offset += (addr >> 3);
635
    /* Note: the check needs two bytes */
636
    if ((io_offset + 1) > env->tr.limit)
637
        goto fail;
638
    val = lduw_kernel(env->tr.base + io_offset);
639
    val >>= (addr & 7);
640
    mask = (1 << size) - 1;
641
    /* all bits must be zero to allow the I/O */
642
    if ((val & mask) != 0) {
643
    fail:
644
        raise_exception_err(EXCP0D_GPF, 0);
645
    }
646
}
647

    
648
void helper_check_iob(uint32_t t0)
649
{
650
    check_io(t0, 1);
651
}
652

    
653
void helper_check_iow(uint32_t t0)
654
{
655
    check_io(t0, 2);
656
}
657

    
658
void helper_check_iol(uint32_t t0)
659
{
660
    check_io(t0, 4);
661
}
662

    
663
void helper_outb(uint32_t port, uint32_t data)
664
{
665
    cpu_outb(port, data & 0xff);
666
}
667

    
668
target_ulong helper_inb(uint32_t port)
669
{
670
    return cpu_inb(port);
671
}
672

    
673
void helper_outw(uint32_t port, uint32_t data)
674
{
675
    cpu_outw(port, data & 0xffff);
676
}
677

    
678
target_ulong helper_inw(uint32_t port)
679
{
680
    return cpu_inw(port);
681
}
682

    
683
void helper_outl(uint32_t port, uint32_t data)
684
{
685
    cpu_outl(port, data);
686
}
687

    
688
target_ulong helper_inl(uint32_t port)
689
{
690
    return cpu_inl(port);
691
}
692

    
693
static inline unsigned int get_sp_mask(unsigned int e2)
694
{
695
    if (e2 & DESC_B_MASK)
696
        return 0xffffffff;
697
    else
698
        return 0xffff;
699
}
700

    
701
static int exeption_has_error_code(int intno)
702
{
703
        switch(intno) {
704
        case 8:
705
        case 10:
706
        case 11:
707
        case 12:
708
        case 13:
709
        case 14:
710
        case 17:
711
            return 1;
712
        }
713
        return 0;
714
}
715

    
716
#ifdef TARGET_X86_64
717
#define SET_ESP(val, sp_mask)\
718
do {\
719
    if ((sp_mask) == 0xffff)\
720
        ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
721
    else if ((sp_mask) == 0xffffffffLL)\
722
        ESP = (uint32_t)(val);\
723
    else\
724
        ESP = (val);\
725
} while (0)
726
#else
727
#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
728
#endif
729

    
730
/* in 64-bit machines, this can overflow. So this segment addition macro
731
 * can be used to trim the value to 32-bit whenever needed */
732
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
733

    
734
/* XXX: add a is_user flag to have proper security support */
735
#define PUSHW(ssp, sp, sp_mask, val)\
736
{\
737
    sp -= 2;\
738
    stw_kernel((ssp) + (sp & (sp_mask)), (val));\
739
}
740

    
741
#define PUSHL(ssp, sp, sp_mask, val)\
742
{\
743
    sp -= 4;\
744
    stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
745
}
746

    
747
#define POPW(ssp, sp, sp_mask, val)\
748
{\
749
    val = lduw_kernel((ssp) + (sp & (sp_mask)));\
750
    sp += 2;\
751
}
752

    
753
#define POPL(ssp, sp, sp_mask, val)\
754
{\
755
    val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
756
    sp += 4;\
757
}
758

    
759
/* protected mode interrupt */
760
static void do_interrupt_protected(int intno, int is_int, int error_code,
761
                                   unsigned int next_eip, int is_hw)
762
{
763
    SegmentCache *dt;
764
    target_ulong ptr, ssp;
765
    int type, dpl, selector, ss_dpl, cpl;
766
    int has_error_code, new_stack, shift;
767
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
768
    uint32_t old_eip, sp_mask;
769

    
770
    has_error_code = 0;
771
    if (!is_int && !is_hw)
772
        has_error_code = exeption_has_error_code(intno);
773
    if (is_int)
774
        old_eip = next_eip;
775
    else
776
        old_eip = env->eip;
777

    
778
    dt = &env->idt;
779
    if (intno * 8 + 7 > dt->limit)
780
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
781
    ptr = dt->base + intno * 8;
782
    e1 = ldl_kernel(ptr);
783
    e2 = ldl_kernel(ptr + 4);
784
    /* check gate type */
785
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
786
    switch(type) {
787
    case 5: /* task gate */
788
        /* must do that check here to return the correct error code */
789
        if (!(e2 & DESC_P_MASK))
790
            raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
791
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
792
        if (has_error_code) {
793
            int type;
794
            uint32_t mask;
795
            /* push the error code */
796
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
797
            shift = type >> 3;
798
            if (env->segs[R_SS].flags & DESC_B_MASK)
799
                mask = 0xffffffff;
800
            else
801
                mask = 0xffff;
802
            esp = (ESP - (2 << shift)) & mask;
803
            ssp = env->segs[R_SS].base + esp;
804
            if (shift)
805
                stl_kernel(ssp, error_code);
806
            else
807
                stw_kernel(ssp, error_code);
808
            SET_ESP(esp, mask);
809
        }
810
        return;
811
    case 6: /* 286 interrupt gate */
812
    case 7: /* 286 trap gate */
813
    case 14: /* 386 interrupt gate */
814
    case 15: /* 386 trap gate */
815
        break;
816
    default:
817
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
818
        break;
819
    }
820
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
821
    cpl = env->hflags & HF_CPL_MASK;
822
    /* check privilege if software int */
823
    if (is_int && dpl < cpl)
824
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
825
    /* check valid bit */
826
    if (!(e2 & DESC_P_MASK))
827
        raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
828
    selector = e1 >> 16;
829
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
830
    if ((selector & 0xfffc) == 0)
831
        raise_exception_err(EXCP0D_GPF, 0);
832

    
833
    if (load_segment(&e1, &e2, selector) != 0)
834
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
835
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
836
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
837
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
838
    if (dpl > cpl)
839
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
840
    if (!(e2 & DESC_P_MASK))
841
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
842
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
843
        /* to inner privilege */
844
        get_ss_esp_from_tss(&ss, &esp, dpl);
845
        if ((ss & 0xfffc) == 0)
846
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
847
        if ((ss & 3) != dpl)
848
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
849
        if (load_segment(&ss_e1, &ss_e2, ss) != 0)
850
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
851
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
852
        if (ss_dpl != dpl)
853
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
854
        if (!(ss_e2 & DESC_S_MASK) ||
855
            (ss_e2 & DESC_CS_MASK) ||
856
            !(ss_e2 & DESC_W_MASK))
857
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
858
        if (!(ss_e2 & DESC_P_MASK))
859
            raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
860
        new_stack = 1;
861
        sp_mask = get_sp_mask(ss_e2);
862
        ssp = get_seg_base(ss_e1, ss_e2);
863
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
864
        /* to same privilege */
865
        if (env->eflags & VM_MASK)
866
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
867
        new_stack = 0;
868
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
869
        ssp = env->segs[R_SS].base;
870
        esp = ESP;
871
        dpl = cpl;
872
    } else {
873
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
874
        new_stack = 0; /* avoid warning */
875
        sp_mask = 0; /* avoid warning */
876
        ssp = 0; /* avoid warning */
877
        esp = 0; /* avoid warning */
878
    }
879

    
880
    shift = type >> 3;
881

    
882
#if 0
883
    /* XXX: check that enough room is available */
884
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
885
    if (env->eflags & VM_MASK)
886
        push_size += 8;
887
    push_size <<= shift;
888
#endif
889
    if (shift == 1) {
890
        if (new_stack) {
891
            if (env->eflags & VM_MASK) {
892
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
893
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
894
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
895
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
896
            }
897
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
898
            PUSHL(ssp, esp, sp_mask, ESP);
899
        }
900
        PUSHL(ssp, esp, sp_mask, compute_eflags());
901
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
902
        PUSHL(ssp, esp, sp_mask, old_eip);
903
        if (has_error_code) {
904
            PUSHL(ssp, esp, sp_mask, error_code);
905
        }
906
    } else {
907
        if (new_stack) {
908
            if (env->eflags & VM_MASK) {
909
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
910
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
911
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
912
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
913
            }
914
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
915
            PUSHW(ssp, esp, sp_mask, ESP);
916
        }
917
        PUSHW(ssp, esp, sp_mask, compute_eflags());
918
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
919
        PUSHW(ssp, esp, sp_mask, old_eip);
920
        if (has_error_code) {
921
            PUSHW(ssp, esp, sp_mask, error_code);
922
        }
923
    }
924

    
925
    if (new_stack) {
926
        if (env->eflags & VM_MASK) {
927
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
928
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
929
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
930
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
931
        }
932
        ss = (ss & ~3) | dpl;
933
        cpu_x86_load_seg_cache(env, R_SS, ss,
934
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
935
    }
936
    SET_ESP(esp, sp_mask);
937

    
938
    selector = (selector & ~3) | dpl;
939
    cpu_x86_load_seg_cache(env, R_CS, selector,
940
                   get_seg_base(e1, e2),
941
                   get_seg_limit(e1, e2),
942
                   e2);
943
    cpu_x86_set_cpl(env, dpl);
944
    env->eip = offset;
945

    
946
    /* interrupt gate clear IF mask */
947
    if ((type & 1) == 0) {
948
        env->eflags &= ~IF_MASK;
949
    }
950
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
951
}
952

    
953
#ifdef TARGET_X86_64
954

    
955
#define PUSHQ(sp, val)\
956
{\
957
    sp -= 8;\
958
    stq_kernel(sp, (val));\
959
}
960

    
961
#define POPQ(sp, val)\
962
{\
963
    val = ldq_kernel(sp);\
964
    sp += 8;\
965
}
966

    
967
static inline target_ulong get_rsp_from_tss(int level)
968
{
969
    int index;
970

    
971
#if 0
972
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
973
           env->tr.base, env->tr.limit);
974
#endif
975

    
976
    if (!(env->tr.flags & DESC_P_MASK))
977
        cpu_abort(env, "invalid tss");
978
    index = 8 * level + 4;
979
    if ((index + 7) > env->tr.limit)
980
        raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
981
    return ldq_kernel(env->tr.base + index);
982
}
983

    
984
/* 64 bit interrupt */
985
static void do_interrupt64(int intno, int is_int, int error_code,
986
                           target_ulong next_eip, int is_hw)
987
{
988
    SegmentCache *dt;
989
    target_ulong ptr;
990
    int type, dpl, selector, cpl, ist;
991
    int has_error_code, new_stack;
992
    uint32_t e1, e2, e3, ss;
993
    target_ulong old_eip, esp, offset;
994

    
995
    has_error_code = 0;
996
    if (!is_int && !is_hw)
997
        has_error_code = exeption_has_error_code(intno);
998
    if (is_int)
999
        old_eip = next_eip;
1000
    else
1001
        old_eip = env->eip;
1002

    
1003
    dt = &env->idt;
1004
    if (intno * 16 + 15 > dt->limit)
1005
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1006
    ptr = dt->base + intno * 16;
1007
    e1 = ldl_kernel(ptr);
1008
    e2 = ldl_kernel(ptr + 4);
1009
    e3 = ldl_kernel(ptr + 8);
1010
    /* check gate type */
1011
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1012
    switch(type) {
1013
    case 14: /* 386 interrupt gate */
1014
    case 15: /* 386 trap gate */
1015
        break;
1016
    default:
1017
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1018
        break;
1019
    }
1020
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1021
    cpl = env->hflags & HF_CPL_MASK;
1022
    /* check privilege if software int */
1023
    if (is_int && dpl < cpl)
1024
        raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1025
    /* check valid bit */
1026
    if (!(e2 & DESC_P_MASK))
1027
        raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1028
    selector = e1 >> 16;
1029
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1030
    ist = e2 & 7;
1031
    if ((selector & 0xfffc) == 0)
1032
        raise_exception_err(EXCP0D_GPF, 0);
1033

    
1034
    if (load_segment(&e1, &e2, selector) != 0)
1035
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1036
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1037
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1038
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1039
    if (dpl > cpl)
1040
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1041
    if (!(e2 & DESC_P_MASK))
1042
        raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1043
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1044
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1045
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1046
        /* to inner privilege */
1047
        if (ist != 0)
1048
            esp = get_rsp_from_tss(ist + 3);
1049
        else
1050
            esp = get_rsp_from_tss(dpl);
1051
        esp &= ~0xfLL; /* align stack */
1052
        ss = 0;
1053
        new_stack = 1;
1054
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1055
        /* to same privilege */
1056
        if (env->eflags & VM_MASK)
1057
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1058
        new_stack = 0;
1059
        if (ist != 0)
1060
            esp = get_rsp_from_tss(ist + 3);
1061
        else
1062
            esp = ESP;
1063
        esp &= ~0xfLL; /* align stack */
1064
        dpl = cpl;
1065
    } else {
1066
        raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1067
        new_stack = 0; /* avoid warning */
1068
        esp = 0; /* avoid warning */
1069
    }
1070

    
1071
    PUSHQ(esp, env->segs[R_SS].selector);
1072
    PUSHQ(esp, ESP);
1073
    PUSHQ(esp, compute_eflags());
1074
    PUSHQ(esp, env->segs[R_CS].selector);
1075
    PUSHQ(esp, old_eip);
1076
    if (has_error_code) {
1077
        PUSHQ(esp, error_code);
1078
    }
1079

    
1080
    if (new_stack) {
1081
        ss = 0 | dpl;
1082
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1083
    }
1084
    ESP = esp;
1085

    
1086
    selector = (selector & ~3) | dpl;
1087
    cpu_x86_load_seg_cache(env, R_CS, selector,
1088
                   get_seg_base(e1, e2),
1089
                   get_seg_limit(e1, e2),
1090
                   e2);
1091
    cpu_x86_set_cpl(env, dpl);
1092
    env->eip = offset;
1093

    
1094
    /* interrupt gate clear IF mask */
1095
    if ((type & 1) == 0) {
1096
        env->eflags &= ~IF_MASK;
1097
    }
1098
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1099
}
1100
#endif
1101

    
1102
#ifdef TARGET_X86_64
1103
#if defined(CONFIG_USER_ONLY)
1104
void helper_syscall(int next_eip_addend)
1105
{
1106
    env->exception_index = EXCP_SYSCALL;
1107
    env->exception_next_eip = env->eip + next_eip_addend;
1108
    cpu_loop_exit(env);
1109
}
1110
#else
1111
void helper_syscall(int next_eip_addend)
1112
{
1113
    int selector;
1114

    
1115
    if (!(env->efer & MSR_EFER_SCE)) {
1116
        raise_exception_err(EXCP06_ILLOP, 0);
1117
    }
1118
    selector = (env->star >> 32) & 0xffff;
1119
    if (env->hflags & HF_LMA_MASK) {
1120
        int code64;
1121

    
1122
        ECX = env->eip + next_eip_addend;
1123
        env->regs[11] = compute_eflags();
1124

    
1125
        code64 = env->hflags & HF_CS64_MASK;
1126

    
1127
        cpu_x86_set_cpl(env, 0);
1128
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1129
                           0, 0xffffffff,
1130
                               DESC_G_MASK | DESC_P_MASK |
1131
                               DESC_S_MASK |
1132
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1133
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1134
                               0, 0xffffffff,
1135
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1136
                               DESC_S_MASK |
1137
                               DESC_W_MASK | DESC_A_MASK);
1138
        env->eflags &= ~env->fmask;
1139
        load_eflags(env->eflags, 0);
1140
        if (code64)
1141
            env->eip = env->lstar;
1142
        else
1143
            env->eip = env->cstar;
1144
    } else {
1145
        ECX = (uint32_t)(env->eip + next_eip_addend);
1146

    
1147
        cpu_x86_set_cpl(env, 0);
1148
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1149
                           0, 0xffffffff,
1150
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1151
                               DESC_S_MASK |
1152
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1153
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1154
                               0, 0xffffffff,
1155
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1156
                               DESC_S_MASK |
1157
                               DESC_W_MASK | DESC_A_MASK);
1158
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1159
        env->eip = (uint32_t)env->star;
1160
    }
1161
}
1162
#endif
1163
#endif
1164

    
1165
#ifdef TARGET_X86_64
1166
void helper_sysret(int dflag)
1167
{
1168
    int cpl, selector;
1169

    
1170
    if (!(env->efer & MSR_EFER_SCE)) {
1171
        raise_exception_err(EXCP06_ILLOP, 0);
1172
    }
1173
    cpl = env->hflags & HF_CPL_MASK;
1174
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1175
        raise_exception_err(EXCP0D_GPF, 0);
1176
    }
1177
    selector = (env->star >> 48) & 0xffff;
1178
    if (env->hflags & HF_LMA_MASK) {
1179
        if (dflag == 2) {
1180
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1181
                                   0, 0xffffffff,
1182
                                   DESC_G_MASK | DESC_P_MASK |
1183
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1184
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1185
                                   DESC_L_MASK);
1186
            env->eip = ECX;
1187
        } else {
1188
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1189
                                   0, 0xffffffff,
1190
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1191
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1192
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1193
            env->eip = (uint32_t)ECX;
1194
        }
1195
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1196
                               0, 0xffffffff,
1197
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1198
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1199
                               DESC_W_MASK | DESC_A_MASK);
1200
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1201
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1202
        cpu_x86_set_cpl(env, 3);
1203
    } else {
1204
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1205
                               0, 0xffffffff,
1206
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1207
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1208
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1209
        env->eip = (uint32_t)ECX;
1210
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1211
                               0, 0xffffffff,
1212
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1213
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1214
                               DESC_W_MASK | DESC_A_MASK);
1215
        env->eflags |= IF_MASK;
1216
        cpu_x86_set_cpl(env, 3);
1217
    }
1218
}
1219
#endif
1220

    
1221
/* real mode interrupt */
1222
static void do_interrupt_real(int intno, int is_int, int error_code,
1223
                              unsigned int next_eip)
1224
{
1225
    SegmentCache *dt;
1226
    target_ulong ptr, ssp;
1227
    int selector;
1228
    uint32_t offset, esp;
1229
    uint32_t old_cs, old_eip;
1230

    
1231
    /* real mode (simpler !) */
1232
    dt = &env->idt;
1233
    if (intno * 4 + 3 > dt->limit)
1234
        raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1235
    ptr = dt->base + intno * 4;
1236
    offset = lduw_kernel(ptr);
1237
    selector = lduw_kernel(ptr + 2);
1238
    esp = ESP;
1239
    ssp = env->segs[R_SS].base;
1240
    if (is_int)
1241
        old_eip = next_eip;
1242
    else
1243
        old_eip = env->eip;
1244
    old_cs = env->segs[R_CS].selector;
1245
    /* XXX: use SS segment size ? */
1246
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1247
    PUSHW(ssp, esp, 0xffff, old_cs);
1248
    PUSHW(ssp, esp, 0xffff, old_eip);
1249

    
1250
    /* update processor state */
1251
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1252
    env->eip = offset;
1253
    env->segs[R_CS].selector = selector;
1254
    env->segs[R_CS].base = (selector << 4);
1255
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1256
}
1257

    
1258
#if defined(CONFIG_USER_ONLY)
1259
/* fake user mode interrupt */
1260
static void do_interrupt_user(int intno, int is_int, int error_code,
1261
                              target_ulong next_eip)
1262
{
1263
    SegmentCache *dt;
1264
    target_ulong ptr;
1265
    int dpl, cpl, shift;
1266
    uint32_t e2;
1267

    
1268
    dt = &env->idt;
1269
    if (env->hflags & HF_LMA_MASK) {
1270
        shift = 4;
1271
    } else {
1272
        shift = 3;
1273
    }
1274
    ptr = dt->base + (intno << shift);
1275
    e2 = ldl_kernel(ptr + 4);
1276

    
1277
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1278
    cpl = env->hflags & HF_CPL_MASK;
1279
    /* check privilege if software int */
1280
    if (is_int && dpl < cpl)
1281
        raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1282

    
1283
    /* Since we emulate only user space, we cannot do more than
1284
       exiting the emulation with the suitable exception and error
1285
       code */
1286
    if (is_int)
1287
        EIP = next_eip;
1288
}
1289

    
1290
#else
1291

    
1292
static void handle_even_inj(int intno, int is_int, int error_code,
1293
                int is_hw, int rm)
1294
{
1295
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1296
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1297
            int type;
1298
            if (is_int)
1299
                    type = SVM_EVTINJ_TYPE_SOFT;
1300
            else
1301
                    type = SVM_EVTINJ_TYPE_EXEPT;
1302
            event_inj = intno | type | SVM_EVTINJ_VALID;
1303
            if (!rm && exeption_has_error_code(intno)) {
1304
                    event_inj |= SVM_EVTINJ_VALID_ERR;
1305
                    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1306
            }
1307
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1308
    }
1309
}
1310
#endif
1311

    
1312
/*
1313
 * Begin execution of an interruption. is_int is TRUE if coming from
1314
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1315
 * instruction. It is only relevant if is_int is TRUE.
1316
 */
1317
static void do_interrupt_all(int intno, int is_int, int error_code,
1318
                             target_ulong next_eip, int is_hw)
1319
{
1320
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1321
        if ((env->cr[0] & CR0_PE_MASK)) {
1322
            static int count;
1323
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1324
                    count, intno, error_code, is_int,
1325
                    env->hflags & HF_CPL_MASK,
1326
                    env->segs[R_CS].selector, EIP,
1327
                    (int)env->segs[R_CS].base + EIP,
1328
                    env->segs[R_SS].selector, ESP);
1329
            if (intno == 0x0e) {
1330
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1331
            } else {
1332
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1333
            }
1334
            qemu_log("\n");
1335
            log_cpu_state(env, X86_DUMP_CCOP);
1336
#if 0
1337
            {
1338
                int i;
1339
                target_ulong ptr;
1340
                qemu_log("       code=");
1341
                ptr = env->segs[R_CS].base + env->eip;
1342
                for(i = 0; i < 16; i++) {
1343
                    qemu_log(" %02x", ldub(ptr + i));
1344
                }
1345
                qemu_log("\n");
1346
            }
1347
#endif
1348
            count++;
1349
        }
1350
    }
1351
    if (env->cr[0] & CR0_PE_MASK) {
1352
#if !defined(CONFIG_USER_ONLY)
1353
        if (env->hflags & HF_SVMI_MASK)
1354
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1355
#endif
1356
#ifdef TARGET_X86_64
1357
        if (env->hflags & HF_LMA_MASK) {
1358
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1359
        } else
1360
#endif
1361
        {
1362
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1363
        }
1364
    } else {
1365
#if !defined(CONFIG_USER_ONLY)
1366
        if (env->hflags & HF_SVMI_MASK)
1367
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1368
#endif
1369
        do_interrupt_real(intno, is_int, error_code, next_eip);
1370
    }
1371

    
1372
#if !defined(CONFIG_USER_ONLY)
1373
    if (env->hflags & HF_SVMI_MASK) {
1374
            uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1375
            stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1376
    }
1377
#endif
1378
}
1379

    
1380
void do_interrupt(CPUState *env1)
1381
{
1382
    CPUState *saved_env;
1383

    
1384
    saved_env = env;
1385
    env = env1;
1386
#if defined(CONFIG_USER_ONLY)
1387
    /* if user mode only, we simulate a fake exception
1388
       which will be handled outside the cpu execution
1389
       loop */
1390
    do_interrupt_user(env->exception_index,
1391
                      env->exception_is_int,
1392
                      env->error_code,
1393
                      env->exception_next_eip);
1394
    /* successfully delivered */
1395
    env->old_exception = -1;
1396
#else
1397
    /* simulate a real cpu exception. On i386, it can
1398
       trigger new exceptions, but we do not handle
1399
       double or triple faults yet. */
1400
    do_interrupt_all(env->exception_index,
1401
                     env->exception_is_int,
1402
                     env->error_code,
1403
                     env->exception_next_eip, 0);
1404
    /* successfully delivered */
1405
    env->old_exception = -1;
1406
#endif
1407
    env = saved_env;
1408
}
1409

    
1410
void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1411
{
1412
    CPUState *saved_env;
1413

    
1414
    saved_env = env;
1415
    env = env1;
1416
    do_interrupt_all(intno, 0, 0, 0, is_hw);
1417
    env = saved_env;
1418
}
1419

    
1420
/* This should come from sysemu.h - if we could include it here... */
1421
void qemu_system_reset_request(void);
1422

    
1423
/*
1424
 * Check nested exceptions and change to double or triple fault if
1425
 * needed. It should only be called, if this is not an interrupt.
1426
 * Returns the new exception number.
1427
 */
1428
static int check_exception(int intno, int *error_code)
1429
{
1430
    int first_contributory = env->old_exception == 0 ||
1431
                              (env->old_exception >= 10 &&
1432
                               env->old_exception <= 13);
1433
    int second_contributory = intno == 0 ||
1434
                               (intno >= 10 && intno <= 13);
1435

    
1436
    qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1437
                env->old_exception, intno);
1438

    
1439
#if !defined(CONFIG_USER_ONLY)
1440
    if (env->old_exception == EXCP08_DBLE) {
1441
        if (env->hflags & HF_SVMI_MASK)
1442
            helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1443

    
1444
        qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1445

    
1446
        qemu_system_reset_request();
1447
        return EXCP_HLT;
1448
    }
1449
#endif
1450

    
1451
    if ((first_contributory && second_contributory)
1452
        || (env->old_exception == EXCP0E_PAGE &&
1453
            (second_contributory || (intno == EXCP0E_PAGE)))) {
1454
        intno = EXCP08_DBLE;
1455
        *error_code = 0;
1456
    }
1457

    
1458
    if (second_contributory || (intno == EXCP0E_PAGE) ||
1459
        (intno == EXCP08_DBLE))
1460
        env->old_exception = intno;
1461

    
1462
    return intno;
1463
}
1464

    
1465
/*
1466
 * Signal an interruption. It is executed in the main CPU loop.
1467
 * is_int is TRUE if coming from the int instruction. next_eip is the
1468
 * EIP value AFTER the interrupt instruction. It is only relevant if
1469
 * is_int is TRUE.
1470
 */
1471
static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1472
                                          int next_eip_addend)
1473
{
1474
    if (!is_int) {
1475
        helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1476
        intno = check_exception(intno, &error_code);
1477
    } else {
1478
        helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1479
    }
1480

    
1481
    env->exception_index = intno;
1482
    env->error_code = error_code;
1483
    env->exception_is_int = is_int;
1484
    env->exception_next_eip = env->eip + next_eip_addend;
1485
    cpu_loop_exit(env);
1486
}
1487

    
1488
/* shortcuts to generate exceptions */
1489

    
1490
static void QEMU_NORETURN raise_exception_err(int exception_index,
1491
                                              int error_code)
1492
{
1493
    raise_interrupt(exception_index, 0, error_code, 0);
1494
}
1495

    
1496
void raise_exception_err_env(CPUState *nenv, int exception_index,
1497
                             int error_code)
1498
{
1499
    env = nenv;
1500
    raise_interrupt(exception_index, 0, error_code, 0);
1501
}
1502

    
1503
static void QEMU_NORETURN raise_exception(int exception_index)
1504
{
1505
    raise_interrupt(exception_index, 0, 0, 0);
1506
}
1507

    
1508
void raise_exception_env(int exception_index, CPUState *nenv)
1509
{
1510
    env = nenv;
1511
    raise_exception(exception_index);
1512
}
1513
/* SMM support */
1514

    
1515
#if defined(CONFIG_USER_ONLY)
1516

    
1517
void do_smm_enter(CPUState *env1)
1518
{
1519
}
1520

    
1521
void helper_rsm(void)
1522
{
1523
}
1524

    
1525
#else
1526

    
1527
#ifdef TARGET_X86_64
1528
#define SMM_REVISION_ID 0x00020064
1529
#else
1530
#define SMM_REVISION_ID 0x00020000
1531
#endif
1532

    
1533
void do_smm_enter(CPUState *env1)
1534
{
1535
    target_ulong sm_state;
1536
    SegmentCache *dt;
1537
    int i, offset;
1538
    CPUState *saved_env;
1539

    
1540
    saved_env = env;
1541
    env = env1;
1542

    
1543
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1544
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1545

    
1546
    env->hflags |= HF_SMM_MASK;
1547
    cpu_smm_update(env);
1548

    
1549
    sm_state = env->smbase + 0x8000;
1550

    
1551
#ifdef TARGET_X86_64
1552
    for(i = 0; i < 6; i++) {
1553
        dt = &env->segs[i];
1554
        offset = 0x7e00 + i * 16;
1555
        stw_phys(sm_state + offset, dt->selector);
1556
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1557
        stl_phys(sm_state + offset + 4, dt->limit);
1558
        stq_phys(sm_state + offset + 8, dt->base);
1559
    }
1560

    
1561
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1562
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1563

    
1564
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1565
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1566
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1567
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1568

    
1569
    stq_phys(sm_state + 0x7e88, env->idt.base);
1570
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1571

    
1572
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1573
    stq_phys(sm_state + 0x7e98, env->tr.base);
1574
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1575
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1576

    
1577
    stq_phys(sm_state + 0x7ed0, env->efer);
1578

    
1579
    stq_phys(sm_state + 0x7ff8, EAX);
1580
    stq_phys(sm_state + 0x7ff0, ECX);
1581
    stq_phys(sm_state + 0x7fe8, EDX);
1582
    stq_phys(sm_state + 0x7fe0, EBX);
1583
    stq_phys(sm_state + 0x7fd8, ESP);
1584
    stq_phys(sm_state + 0x7fd0, EBP);
1585
    stq_phys(sm_state + 0x7fc8, ESI);
1586
    stq_phys(sm_state + 0x7fc0, EDI);
1587
    for(i = 8; i < 16; i++)
1588
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1589
    stq_phys(sm_state + 0x7f78, env->eip);
1590
    stl_phys(sm_state + 0x7f70, compute_eflags());
1591
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1592
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1593

    
1594
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1595
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1596
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1597

    
1598
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1599
    stl_phys(sm_state + 0x7f00, env->smbase);
1600
#else
1601
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1602
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1603
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1604
    stl_phys(sm_state + 0x7ff0, env->eip);
1605
    stl_phys(sm_state + 0x7fec, EDI);
1606
    stl_phys(sm_state + 0x7fe8, ESI);
1607
    stl_phys(sm_state + 0x7fe4, EBP);
1608
    stl_phys(sm_state + 0x7fe0, ESP);
1609
    stl_phys(sm_state + 0x7fdc, EBX);
1610
    stl_phys(sm_state + 0x7fd8, EDX);
1611
    stl_phys(sm_state + 0x7fd4, ECX);
1612
    stl_phys(sm_state + 0x7fd0, EAX);
1613
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1614
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1615

    
1616
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1617
    stl_phys(sm_state + 0x7f64, env->tr.base);
1618
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1619
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1620

    
1621
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1622
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1623
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1624
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1625

    
1626
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1627
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1628

    
1629
    stl_phys(sm_state + 0x7f58, env->idt.base);
1630
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1631

    
1632
    for(i = 0; i < 6; i++) {
1633
        dt = &env->segs[i];
1634
        if (i < 3)
1635
            offset = 0x7f84 + i * 12;
1636
        else
1637
            offset = 0x7f2c + (i - 3) * 12;
1638
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1639
        stl_phys(sm_state + offset + 8, dt->base);
1640
        stl_phys(sm_state + offset + 4, dt->limit);
1641
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1642
    }
1643
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1644

    
1645
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1646
    stl_phys(sm_state + 0x7ef8, env->smbase);
1647
#endif
1648
    /* init SMM cpu state */
1649

    
1650
#ifdef TARGET_X86_64
1651
    cpu_load_efer(env, 0);
1652
#endif
1653
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1654
    env->eip = 0x00008000;
1655
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1656
                           0xffffffff, 0);
1657
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1658
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1659
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1660
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1661
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1662

    
1663
    cpu_x86_update_cr0(env,
1664
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1665
    cpu_x86_update_cr4(env, 0);
1666
    env->dr[7] = 0x00000400;
1667
    CC_OP = CC_OP_EFLAGS;
1668
    env = saved_env;
1669
}
1670

    
1671
void helper_rsm(void)
1672
{
1673
    target_ulong sm_state;
1674
    int i, offset;
1675
    uint32_t val;
1676

    
1677
    sm_state = env->smbase + 0x8000;
1678
#ifdef TARGET_X86_64
1679
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1680

    
1681
    for(i = 0; i < 6; i++) {
1682
        offset = 0x7e00 + i * 16;
1683
        cpu_x86_load_seg_cache(env, i,
1684
                               lduw_phys(sm_state + offset),
1685
                               ldq_phys(sm_state + offset + 8),
1686
                               ldl_phys(sm_state + offset + 4),
1687
                               (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1688
    }
1689

    
1690
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1691
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1692

    
1693
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1694
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1695
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1696
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1697

    
1698
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1699
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1700

    
1701
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1702
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1703
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1704
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1705

    
1706
    EAX = ldq_phys(sm_state + 0x7ff8);
1707
    ECX = ldq_phys(sm_state + 0x7ff0);
1708
    EDX = ldq_phys(sm_state + 0x7fe8);
1709
    EBX = ldq_phys(sm_state + 0x7fe0);
1710
    ESP = ldq_phys(sm_state + 0x7fd8);
1711
    EBP = ldq_phys(sm_state + 0x7fd0);
1712
    ESI = ldq_phys(sm_state + 0x7fc8);
1713
    EDI = ldq_phys(sm_state + 0x7fc0);
1714
    for(i = 8; i < 16; i++)
1715
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1716
    env->eip = ldq_phys(sm_state + 0x7f78);
1717
    load_eflags(ldl_phys(sm_state + 0x7f70),
1718
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1719
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1720
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1721

    
1722
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1723
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1724
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1725

    
1726
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1727
    if (val & 0x20000) {
1728
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1729
    }
1730
#else
1731
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1732
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1733
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1734
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1735
    env->eip = ldl_phys(sm_state + 0x7ff0);
1736
    EDI = ldl_phys(sm_state + 0x7fec);
1737
    ESI = ldl_phys(sm_state + 0x7fe8);
1738
    EBP = ldl_phys(sm_state + 0x7fe4);
1739
    ESP = ldl_phys(sm_state + 0x7fe0);
1740
    EBX = ldl_phys(sm_state + 0x7fdc);
1741
    EDX = ldl_phys(sm_state + 0x7fd8);
1742
    ECX = ldl_phys(sm_state + 0x7fd4);
1743
    EAX = ldl_phys(sm_state + 0x7fd0);
1744
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1745
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1746

    
1747
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1748
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1749
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1750
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1751

    
1752
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1753
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1754
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1755
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1756

    
1757
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1758
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1759

    
1760
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1761
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1762

    
1763
    for(i = 0; i < 6; i++) {
1764
        if (i < 3)
1765
            offset = 0x7f84 + i * 12;
1766
        else
1767
            offset = 0x7f2c + (i - 3) * 12;
1768
        cpu_x86_load_seg_cache(env, i,
1769
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1770
                               ldl_phys(sm_state + offset + 8),
1771
                               ldl_phys(sm_state + offset + 4),
1772
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1773
    }
1774
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1775

    
1776
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1777
    if (val & 0x20000) {
1778
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1779
    }
1780
#endif
1781
    CC_OP = CC_OP_EFLAGS;
1782
    env->hflags &= ~HF_SMM_MASK;
1783
    cpu_smm_update(env);
1784

    
1785
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1786
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1787
}
1788

    
1789
#endif /* !CONFIG_USER_ONLY */
1790

    
1791

    
1792
/* division, flags are undefined */
1793

    
1794
void helper_divb_AL(target_ulong t0)
1795
{
1796
    unsigned int num, den, q, r;
1797

    
1798
    num = (EAX & 0xffff);
1799
    den = (t0 & 0xff);
1800
    if (den == 0) {
1801
        raise_exception(EXCP00_DIVZ);
1802
    }
1803
    q = (num / den);
1804
    if (q > 0xff)
1805
        raise_exception(EXCP00_DIVZ);
1806
    q &= 0xff;
1807
    r = (num % den) & 0xff;
1808
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1809
}
1810

    
1811
void helper_idivb_AL(target_ulong t0)
1812
{
1813
    int num, den, q, r;
1814

    
1815
    num = (int16_t)EAX;
1816
    den = (int8_t)t0;
1817
    if (den == 0) {
1818
        raise_exception(EXCP00_DIVZ);
1819
    }
1820
    q = (num / den);
1821
    if (q != (int8_t)q)
1822
        raise_exception(EXCP00_DIVZ);
1823
    q &= 0xff;
1824
    r = (num % den) & 0xff;
1825
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1826
}
1827

    
1828
void helper_divw_AX(target_ulong t0)
1829
{
1830
    unsigned int num, den, q, r;
1831

    
1832
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1833
    den = (t0 & 0xffff);
1834
    if (den == 0) {
1835
        raise_exception(EXCP00_DIVZ);
1836
    }
1837
    q = (num / den);
1838
    if (q > 0xffff)
1839
        raise_exception(EXCP00_DIVZ);
1840
    q &= 0xffff;
1841
    r = (num % den) & 0xffff;
1842
    EAX = (EAX & ~0xffff) | q;
1843
    EDX = (EDX & ~0xffff) | r;
1844
}
1845

    
1846
void helper_idivw_AX(target_ulong t0)
1847
{
1848
    int num, den, q, r;
1849

    
1850
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1851
    den = (int16_t)t0;
1852
    if (den == 0) {
1853
        raise_exception(EXCP00_DIVZ);
1854
    }
1855
    q = (num / den);
1856
    if (q != (int16_t)q)
1857
        raise_exception(EXCP00_DIVZ);
1858
    q &= 0xffff;
1859
    r = (num % den) & 0xffff;
1860
    EAX = (EAX & ~0xffff) | q;
1861
    EDX = (EDX & ~0xffff) | r;
1862
}
1863

    
1864
void helper_divl_EAX(target_ulong t0)
1865
{
1866
    unsigned int den, r;
1867
    uint64_t num, q;
1868

    
1869
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1870
    den = t0;
1871
    if (den == 0) {
1872
        raise_exception(EXCP00_DIVZ);
1873
    }
1874
    q = (num / den);
1875
    r = (num % den);
1876
    if (q > 0xffffffff)
1877
        raise_exception(EXCP00_DIVZ);
1878
    EAX = (uint32_t)q;
1879
    EDX = (uint32_t)r;
1880
}
1881

    
1882
void helper_idivl_EAX(target_ulong t0)
1883
{
1884
    int den, r;
1885
    int64_t num, q;
1886

    
1887
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1888
    den = t0;
1889
    if (den == 0) {
1890
        raise_exception(EXCP00_DIVZ);
1891
    }
1892
    q = (num / den);
1893
    r = (num % den);
1894
    if (q != (int32_t)q)
1895
        raise_exception(EXCP00_DIVZ);
1896
    EAX = (uint32_t)q;
1897
    EDX = (uint32_t)r;
1898
}
1899

    
1900
/* bcd */
1901

    
1902
/* XXX: exception */
1903
void helper_aam(int base)
1904
{
1905
    int al, ah;
1906
    al = EAX & 0xff;
1907
    ah = al / base;
1908
    al = al % base;
1909
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1910
    CC_DST = al;
1911
}
1912

    
1913
void helper_aad(int base)
1914
{
1915
    int al, ah;
1916
    al = EAX & 0xff;
1917
    ah = (EAX >> 8) & 0xff;
1918
    al = ((ah * base) + al) & 0xff;
1919
    EAX = (EAX & ~0xffff) | al;
1920
    CC_DST = al;
1921
}
1922

    
1923
void helper_aaa(void)
1924
{
1925
    int icarry;
1926
    int al, ah, af;
1927
    int eflags;
1928

    
1929
    eflags = helper_cc_compute_all(CC_OP);
1930
    af = eflags & CC_A;
1931
    al = EAX & 0xff;
1932
    ah = (EAX >> 8) & 0xff;
1933

    
1934
    icarry = (al > 0xf9);
1935
    if (((al & 0x0f) > 9 ) || af) {
1936
        al = (al + 6) & 0x0f;
1937
        ah = (ah + 1 + icarry) & 0xff;
1938
        eflags |= CC_C | CC_A;
1939
    } else {
1940
        eflags &= ~(CC_C | CC_A);
1941
        al &= 0x0f;
1942
    }
1943
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1944
    CC_SRC = eflags;
1945
}
1946

    
1947
void helper_aas(void)
1948
{
1949
    int icarry;
1950
    int al, ah, af;
1951
    int eflags;
1952

    
1953
    eflags = helper_cc_compute_all(CC_OP);
1954
    af = eflags & CC_A;
1955
    al = EAX & 0xff;
1956
    ah = (EAX >> 8) & 0xff;
1957

    
1958
    icarry = (al < 6);
1959
    if (((al & 0x0f) > 9 ) || af) {
1960
        al = (al - 6) & 0x0f;
1961
        ah = (ah - 1 - icarry) & 0xff;
1962
        eflags |= CC_C | CC_A;
1963
    } else {
1964
        eflags &= ~(CC_C | CC_A);
1965
        al &= 0x0f;
1966
    }
1967
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1968
    CC_SRC = eflags;
1969
}
1970

    
1971
void helper_daa(void)
1972
{
1973
    int old_al, al, af, cf;
1974
    int eflags;
1975

    
1976
    eflags = helper_cc_compute_all(CC_OP);
1977
    cf = eflags & CC_C;
1978
    af = eflags & CC_A;
1979
    old_al = al = EAX & 0xff;
1980

    
1981
    eflags = 0;
1982
    if (((al & 0x0f) > 9 ) || af) {
1983
        al = (al + 6) & 0xff;
1984
        eflags |= CC_A;
1985
    }
1986
    if ((old_al > 0x99) || cf) {
1987
        al = (al + 0x60) & 0xff;
1988
        eflags |= CC_C;
1989
    }
1990
    EAX = (EAX & ~0xff) | al;
1991
    /* well, speed is not an issue here, so we compute the flags by hand */
1992
    eflags |= (al == 0) << 6; /* zf */
1993
    eflags |= parity_table[al]; /* pf */
1994
    eflags |= (al & 0x80); /* sf */
1995
    CC_SRC = eflags;
1996
}
1997

    
1998
void helper_das(void)
1999
{
2000
    int al, al1, af, cf;
2001
    int eflags;
2002

    
2003
    eflags = helper_cc_compute_all(CC_OP);
2004
    cf = eflags & CC_C;
2005
    af = eflags & CC_A;
2006
    al = EAX & 0xff;
2007

    
2008
    eflags = 0;
2009
    al1 = al;
2010
    if (((al & 0x0f) > 9 ) || af) {
2011
        eflags |= CC_A;
2012
        if (al < 6 || cf)
2013
            eflags |= CC_C;
2014
        al = (al - 6) & 0xff;
2015
    }
2016
    if ((al1 > 0x99) || cf) {
2017
        al = (al - 0x60) & 0xff;
2018
        eflags |= CC_C;
2019
    }
2020
    EAX = (EAX & ~0xff) | al;
2021
    /* well, speed is not an issue here, so we compute the flags by hand */
2022
    eflags |= (al == 0) << 6; /* zf */
2023
    eflags |= parity_table[al]; /* pf */
2024
    eflags |= (al & 0x80); /* sf */
2025
    CC_SRC = eflags;
2026
}
2027

    
2028
void helper_into(int next_eip_addend)
2029
{
2030
    int eflags;
2031
    eflags = helper_cc_compute_all(CC_OP);
2032
    if (eflags & CC_O) {
2033
        raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2034
    }
2035
}
2036

    
2037
void helper_cmpxchg8b(target_ulong a0)
2038
{
2039
    uint64_t d;
2040
    int eflags;
2041

    
2042
    eflags = helper_cc_compute_all(CC_OP);
2043
    d = ldq(a0);
2044
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2045
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2046
        eflags |= CC_Z;
2047
    } else {
2048
        /* always do the store */
2049
        stq(a0, d); 
2050
        EDX = (uint32_t)(d >> 32);
2051
        EAX = (uint32_t)d;
2052
        eflags &= ~CC_Z;
2053
    }
2054
    CC_SRC = eflags;
2055
}
2056

    
2057
#ifdef TARGET_X86_64
2058
void helper_cmpxchg16b(target_ulong a0)
2059
{
2060
    uint64_t d0, d1;
2061
    int eflags;
2062

    
2063
    if ((a0 & 0xf) != 0)
2064
        raise_exception(EXCP0D_GPF);
2065
    eflags = helper_cc_compute_all(CC_OP);
2066
    d0 = ldq(a0);
2067
    d1 = ldq(a0 + 8);
2068
    if (d0 == EAX && d1 == EDX) {
2069
        stq(a0, EBX);
2070
        stq(a0 + 8, ECX);
2071
        eflags |= CC_Z;
2072
    } else {
2073
        /* always do the store */
2074
        stq(a0, d0); 
2075
        stq(a0 + 8, d1); 
2076
        EDX = d1;
2077
        EAX = d0;
2078
        eflags &= ~CC_Z;
2079
    }
2080
    CC_SRC = eflags;
2081
}
2082
#endif
2083

    
2084
void helper_single_step(void)
2085
{
2086
#ifndef CONFIG_USER_ONLY
2087
    check_hw_breakpoints(env, 1);
2088
    env->dr[6] |= DR6_BS;
2089
#endif
2090
    raise_exception(EXCP01_DB);
2091
}
2092

    
2093
void helper_cpuid(void)
2094
{
2095
    uint32_t eax, ebx, ecx, edx;
2096

    
2097
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2098

    
2099
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2100
    EAX = eax;
2101
    EBX = ebx;
2102
    ECX = ecx;
2103
    EDX = edx;
2104
}
2105

    
2106
void helper_enter_level(int level, int data32, target_ulong t1)
2107
{
2108
    target_ulong ssp;
2109
    uint32_t esp_mask, esp, ebp;
2110

    
2111
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2112
    ssp = env->segs[R_SS].base;
2113
    ebp = EBP;
2114
    esp = ESP;
2115
    if (data32) {
2116
        /* 32 bit */
2117
        esp -= 4;
2118
        while (--level) {
2119
            esp -= 4;
2120
            ebp -= 4;
2121
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2122
        }
2123
        esp -= 4;
2124
        stl(ssp + (esp & esp_mask), t1);
2125
    } else {
2126
        /* 16 bit */
2127
        esp -= 2;
2128
        while (--level) {
2129
            esp -= 2;
2130
            ebp -= 2;
2131
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2132
        }
2133
        esp -= 2;
2134
        stw(ssp + (esp & esp_mask), t1);
2135
    }
2136
}
2137

    
2138
#ifdef TARGET_X86_64
2139
void helper_enter64_level(int level, int data64, target_ulong t1)
2140
{
2141
    target_ulong esp, ebp;
2142
    ebp = EBP;
2143
    esp = ESP;
2144

    
2145
    if (data64) {
2146
        /* 64 bit */
2147
        esp -= 8;
2148
        while (--level) {
2149
            esp -= 8;
2150
            ebp -= 8;
2151
            stq(esp, ldq(ebp));
2152
        }
2153
        esp -= 8;
2154
        stq(esp, t1);
2155
    } else {
2156
        /* 16 bit */
2157
        esp -= 2;
2158
        while (--level) {
2159
            esp -= 2;
2160
            ebp -= 2;
2161
            stw(esp, lduw(ebp));
2162
        }
2163
        esp -= 2;
2164
        stw(esp, t1);
2165
    }
2166
}
2167
#endif
2168

    
2169
void helper_lldt(int selector)
2170
{
2171
    SegmentCache *dt;
2172
    uint32_t e1, e2;
2173
    int index, entry_limit;
2174
    target_ulong ptr;
2175

    
2176
    selector &= 0xffff;
2177
    if ((selector & 0xfffc) == 0) {
2178
        /* XXX: NULL selector case: invalid LDT */
2179
        env->ldt.base = 0;
2180
        env->ldt.limit = 0;
2181
    } else {
2182
        if (selector & 0x4)
2183
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184
        dt = &env->gdt;
2185
        index = selector & ~7;
2186
#ifdef TARGET_X86_64
2187
        if (env->hflags & HF_LMA_MASK)
2188
            entry_limit = 15;
2189
        else
2190
#endif
2191
            entry_limit = 7;
2192
        if ((index + entry_limit) > dt->limit)
2193
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2194
        ptr = dt->base + index;
2195
        e1 = ldl_kernel(ptr);
2196
        e2 = ldl_kernel(ptr + 4);
2197
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2198
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199
        if (!(e2 & DESC_P_MASK))
2200
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2201
#ifdef TARGET_X86_64
2202
        if (env->hflags & HF_LMA_MASK) {
2203
            uint32_t e3;
2204
            e3 = ldl_kernel(ptr + 8);
2205
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2206
            env->ldt.base |= (target_ulong)e3 << 32;
2207
        } else
2208
#endif
2209
        {
2210
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2211
        }
2212
    }
2213
    env->ldt.selector = selector;
2214
}
2215

    
2216
void helper_ltr(int selector)
2217
{
2218
    SegmentCache *dt;
2219
    uint32_t e1, e2;
2220
    int index, type, entry_limit;
2221
    target_ulong ptr;
2222

    
2223
    selector &= 0xffff;
2224
    if ((selector & 0xfffc) == 0) {
2225
        /* NULL selector case: invalid TR */
2226
        env->tr.base = 0;
2227
        env->tr.limit = 0;
2228
        env->tr.flags = 0;
2229
    } else {
2230
        if (selector & 0x4)
2231
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2232
        dt = &env->gdt;
2233
        index = selector & ~7;
2234
#ifdef TARGET_X86_64
2235
        if (env->hflags & HF_LMA_MASK)
2236
            entry_limit = 15;
2237
        else
2238
#endif
2239
            entry_limit = 7;
2240
        if ((index + entry_limit) > dt->limit)
2241
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2242
        ptr = dt->base + index;
2243
        e1 = ldl_kernel(ptr);
2244
        e2 = ldl_kernel(ptr + 4);
2245
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2246
        if ((e2 & DESC_S_MASK) ||
2247
            (type != 1 && type != 9))
2248
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2249
        if (!(e2 & DESC_P_MASK))
2250
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2251
#ifdef TARGET_X86_64
2252
        if (env->hflags & HF_LMA_MASK) {
2253
            uint32_t e3, e4;
2254
            e3 = ldl_kernel(ptr + 8);
2255
            e4 = ldl_kernel(ptr + 12);
2256
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2257
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2258
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2259
            env->tr.base |= (target_ulong)e3 << 32;
2260
        } else
2261
#endif
2262
        {
2263
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2264
        }
2265
        e2 |= DESC_TSS_BUSY_MASK;
2266
        stl_kernel(ptr + 4, e2);
2267
    }
2268
    env->tr.selector = selector;
2269
}
2270

    
2271
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2272
void helper_load_seg(int seg_reg, int selector)
2273
{
2274
    uint32_t e1, e2;
2275
    int cpl, dpl, rpl;
2276
    SegmentCache *dt;
2277
    int index;
2278
    target_ulong ptr;
2279

    
2280
    selector &= 0xffff;
2281
    cpl = env->hflags & HF_CPL_MASK;
2282
    if ((selector & 0xfffc) == 0) {
2283
        /* null selector case */
2284
        if (seg_reg == R_SS
2285
#ifdef TARGET_X86_64
2286
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2287
#endif
2288
            )
2289
            raise_exception_err(EXCP0D_GPF, 0);
2290
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2291
    } else {
2292

    
2293
        if (selector & 0x4)
2294
            dt = &env->ldt;
2295
        else
2296
            dt = &env->gdt;
2297
        index = selector & ~7;
2298
        if ((index + 7) > dt->limit)
2299
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2300
        ptr = dt->base + index;
2301
        e1 = ldl_kernel(ptr);
2302
        e2 = ldl_kernel(ptr + 4);
2303

    
2304
        if (!(e2 & DESC_S_MASK))
2305
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2306
        rpl = selector & 3;
2307
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2308
        if (seg_reg == R_SS) {
2309
            /* must be writable segment */
2310
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2311
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2312
            if (rpl != cpl || dpl != cpl)
2313
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2314
        } else {
2315
            /* must be readable segment */
2316
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2317
                raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2318

    
2319
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2320
                /* if not conforming code, test rights */
2321
                if (dpl < cpl || dpl < rpl)
2322
                    raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2323
            }
2324
        }
2325

    
2326
        if (!(e2 & DESC_P_MASK)) {
2327
            if (seg_reg == R_SS)
2328
                raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2329
            else
2330
                raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2331
        }
2332

    
2333
        /* set the access bit if not already set */
2334
        if (!(e2 & DESC_A_MASK)) {
2335
            e2 |= DESC_A_MASK;
2336
            stl_kernel(ptr + 4, e2);
2337
        }
2338

    
2339
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2340
                       get_seg_base(e1, e2),
2341
                       get_seg_limit(e1, e2),
2342
                       e2);
2343
#if 0
2344
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2345
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2346
#endif
2347
    }
2348
}
2349

    
2350
/* protected mode jump */
2351
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2352
                           int next_eip_addend)
2353
{
2354
    int gate_cs, type;
2355
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2356
    target_ulong next_eip;
2357

    
2358
    if ((new_cs & 0xfffc) == 0)
2359
        raise_exception_err(EXCP0D_GPF, 0);
2360
    if (load_segment(&e1, &e2, new_cs) != 0)
2361
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2362
    cpl = env->hflags & HF_CPL_MASK;
2363
    if (e2 & DESC_S_MASK) {
2364
        if (!(e2 & DESC_CS_MASK))
2365
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2366
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2367
        if (e2 & DESC_C_MASK) {
2368
            /* conforming code segment */
2369
            if (dpl > cpl)
2370
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371
        } else {
2372
            /* non conforming code segment */
2373
            rpl = new_cs & 3;
2374
            if (rpl > cpl)
2375
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376
            if (dpl != cpl)
2377
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378
        }
2379
        if (!(e2 & DESC_P_MASK))
2380
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2381
        limit = get_seg_limit(e1, e2);
2382
        if (new_eip > limit &&
2383
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2384
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2386
                       get_seg_base(e1, e2), limit, e2);
2387
        EIP = new_eip;
2388
    } else {
2389
        /* jump to call or task gate */
2390
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391
        rpl = new_cs & 3;
2392
        cpl = env->hflags & HF_CPL_MASK;
2393
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2394
        switch(type) {
2395
        case 1: /* 286 TSS */
2396
        case 9: /* 386 TSS */
2397
        case 5: /* task gate */
2398
            if (dpl < cpl || dpl < rpl)
2399
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400
            next_eip = env->eip + next_eip_addend;
2401
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2402
            CC_OP = CC_OP_EFLAGS;
2403
            break;
2404
        case 4: /* 286 call gate */
2405
        case 12: /* 386 call gate */
2406
            if ((dpl < cpl) || (dpl < rpl))
2407
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408
            if (!(e2 & DESC_P_MASK))
2409
                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2410
            gate_cs = e1 >> 16;
2411
            new_eip = (e1 & 0xffff);
2412
            if (type == 12)
2413
                new_eip |= (e2 & 0xffff0000);
2414
            if (load_segment(&e1, &e2, gate_cs) != 0)
2415
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2416
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417
            /* must be code segment */
2418
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2419
                 (DESC_S_MASK | DESC_CS_MASK)))
2420
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2421
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2422
                (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2423
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2424
            if (!(e2 & DESC_P_MASK))
2425
                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2426
            limit = get_seg_limit(e1, e2);
2427
            if (new_eip > limit)
2428
                raise_exception_err(EXCP0D_GPF, 0);
2429
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2430
                                   get_seg_base(e1, e2), limit, e2);
2431
            EIP = new_eip;
2432
            break;
2433
        default:
2434
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2435
            break;
2436
        }
2437
    }
2438
}
2439

    
2440
/* real mode call */
2441
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2442
                       int shift, int next_eip)
2443
{
2444
    int new_eip;
2445
    uint32_t esp, esp_mask;
2446
    target_ulong ssp;
2447

    
2448
    new_eip = new_eip1;
2449
    esp = ESP;
2450
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2451
    ssp = env->segs[R_SS].base;
2452
    if (shift) {
2453
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2454
        PUSHL(ssp, esp, esp_mask, next_eip);
2455
    } else {
2456
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2457
        PUSHW(ssp, esp, esp_mask, next_eip);
2458
    }
2459

    
2460
    SET_ESP(esp, esp_mask);
2461
    env->eip = new_eip;
2462
    env->segs[R_CS].selector = new_cs;
2463
    env->segs[R_CS].base = (new_cs << 4);
2464
}
2465

    
2466
/* protected mode call */
2467
void helper_lcall_protected(int new_cs, target_ulong new_eip, 
2468
                            int shift, int next_eip_addend)
2469
{
2470
    int new_stack, i;
2471
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2472
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2473
    uint32_t val, limit, old_sp_mask;
2474
    target_ulong ssp, old_ssp, next_eip;
2475

    
2476
    next_eip = env->eip + next_eip_addend;
2477
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2478
    LOG_PCALL_STATE(env);
2479
    if ((new_cs & 0xfffc) == 0)
2480
        raise_exception_err(EXCP0D_GPF, 0);
2481
    if (load_segment(&e1, &e2, new_cs) != 0)
2482
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2483
    cpl = env->hflags & HF_CPL_MASK;
2484
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2485
    if (e2 & DESC_S_MASK) {
2486
        if (!(e2 & DESC_CS_MASK))
2487
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2488
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489
        if (e2 & DESC_C_MASK) {
2490
            /* conforming code segment */
2491
            if (dpl > cpl)
2492
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2493
        } else {
2494
            /* non conforming code segment */
2495
            rpl = new_cs & 3;
2496
            if (rpl > cpl)
2497
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2498
            if (dpl != cpl)
2499
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500
        }
2501
        if (!(e2 & DESC_P_MASK))
2502
            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503

    
2504
#ifdef TARGET_X86_64
2505
        /* XXX: check 16/32 bit cases in long mode */
2506
        if (shift == 2) {
2507
            target_ulong rsp;
2508
            /* 64 bit case */
2509
            rsp = ESP;
2510
            PUSHQ(rsp, env->segs[R_CS].selector);
2511
            PUSHQ(rsp, next_eip);
2512
            /* from this point, not restartable */
2513
            ESP = rsp;
2514
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2515
                                   get_seg_base(e1, e2),
2516
                                   get_seg_limit(e1, e2), e2);
2517
            EIP = new_eip;
2518
        } else
2519
#endif
2520
        {
2521
            sp = ESP;
2522
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2523
            ssp = env->segs[R_SS].base;
2524
            if (shift) {
2525
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2526
                PUSHL(ssp, sp, sp_mask, next_eip);
2527
            } else {
2528
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529
                PUSHW(ssp, sp, sp_mask, next_eip);
2530
            }
2531

    
2532
            limit = get_seg_limit(e1, e2);
2533
            if (new_eip > limit)
2534
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2535
            /* from this point, not restartable */
2536
            SET_ESP(sp, sp_mask);
2537
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2538
                                   get_seg_base(e1, e2), limit, e2);
2539
            EIP = new_eip;
2540
        }
2541
    } else {
2542
        /* check gate type */
2543
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2544
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2545
        rpl = new_cs & 3;
2546
        switch(type) {
2547
        case 1: /* available 286 TSS */
2548
        case 9: /* available 386 TSS */
2549
        case 5: /* task gate */
2550
            if (dpl < cpl || dpl < rpl)
2551
                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2552
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2553
            CC_OP = CC_OP_EFLAGS;
2554
            return;
2555
        case 4: /* 286 call gate */
2556
        case 12: /* 386 call gate */
2557
            break;
2558
        default:
2559
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2560
            break;
2561
        }
2562
        shift = type >> 3;
2563

    
2564
        if (dpl < cpl || dpl < rpl)
2565
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2566
        /* check valid bit */
2567
        if (!(e2 & DESC_P_MASK))
2568
            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2569
        selector = e1 >> 16;
2570
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2571
        param_count = e2 & 0x1f;
2572
        if ((selector & 0xfffc) == 0)
2573
            raise_exception_err(EXCP0D_GPF, 0);
2574

    
2575
        if (load_segment(&e1, &e2, selector) != 0)
2576
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2578
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2579
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2580
        if (dpl > cpl)
2581
            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2582
        if (!(e2 & DESC_P_MASK))
2583
            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2584

    
2585
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2586
            /* to inner privilege */
2587
            get_ss_esp_from_tss(&ss, &sp, dpl);
2588
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2589
                        ss, sp, param_count, ESP);
2590
            if ((ss & 0xfffc) == 0)
2591
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2592
            if ((ss & 3) != dpl)
2593
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2594
            if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2595
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2596
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2597
            if (ss_dpl != dpl)
2598
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2599
            if (!(ss_e2 & DESC_S_MASK) ||
2600
                (ss_e2 & DESC_CS_MASK) ||
2601
                !(ss_e2 & DESC_W_MASK))
2602
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2603
            if (!(ss_e2 & DESC_P_MASK))
2604
                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2605

    
2606
            //            push_size = ((param_count * 2) + 8) << shift;
2607

    
2608
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2609
            old_ssp = env->segs[R_SS].base;
2610

    
2611
            sp_mask = get_sp_mask(ss_e2);
2612
            ssp = get_seg_base(ss_e1, ss_e2);
2613
            if (shift) {
2614
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2615
                PUSHL(ssp, sp, sp_mask, ESP);
2616
                for(i = param_count - 1; i >= 0; i--) {
2617
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2618
                    PUSHL(ssp, sp, sp_mask, val);
2619
                }
2620
            } else {
2621
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2622
                PUSHW(ssp, sp, sp_mask, ESP);
2623
                for(i = param_count - 1; i >= 0; i--) {
2624
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2625
                    PUSHW(ssp, sp, sp_mask, val);
2626
                }
2627
            }
2628
            new_stack = 1;
2629
        } else {
2630
            /* to same privilege */
2631
            sp = ESP;
2632
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2633
            ssp = env->segs[R_SS].base;
2634
            //            push_size = (4 << shift);
2635
            new_stack = 0;
2636
        }
2637

    
2638
        if (shift) {
2639
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2640
            PUSHL(ssp, sp, sp_mask, next_eip);
2641
        } else {
2642
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2643
            PUSHW(ssp, sp, sp_mask, next_eip);
2644
        }
2645

    
2646
        /* from this point, not restartable */
2647

    
2648
        if (new_stack) {
2649
            ss = (ss & ~3) | dpl;
2650
            cpu_x86_load_seg_cache(env, R_SS, ss,
2651
                                   ssp,
2652
                                   get_seg_limit(ss_e1, ss_e2),
2653
                                   ss_e2);
2654
        }
2655

    
2656
        selector = (selector & ~3) | dpl;
2657
        cpu_x86_load_seg_cache(env, R_CS, selector,
2658
                       get_seg_base(e1, e2),
2659
                       get_seg_limit(e1, e2),
2660
                       e2);
2661
        cpu_x86_set_cpl(env, dpl);
2662
        SET_ESP(sp, sp_mask);
2663
        EIP = offset;
2664
    }
2665
}
2666

    
2667
/* real and vm86 mode iret */
2668
void helper_iret_real(int shift)
2669
{
2670
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2671
    target_ulong ssp;
2672
    int eflags_mask;
2673

    
2674
    sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2675
    sp = ESP;
2676
    ssp = env->segs[R_SS].base;
2677
    if (shift == 1) {
2678
        /* 32 bits */
2679
        POPL(ssp, sp, sp_mask, new_eip);
2680
        POPL(ssp, sp, sp_mask, new_cs);
2681
        new_cs &= 0xffff;
2682
        POPL(ssp, sp, sp_mask, new_eflags);
2683
    } else {
2684
        /* 16 bits */
2685
        POPW(ssp, sp, sp_mask, new_eip);
2686
        POPW(ssp, sp, sp_mask, new_cs);
2687
        POPW(ssp, sp, sp_mask, new_eflags);
2688
    }
2689
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2690
    env->segs[R_CS].selector = new_cs;
2691
    env->segs[R_CS].base = (new_cs << 4);
2692
    env->eip = new_eip;
2693
    if (env->eflags & VM_MASK)
2694
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2695
    else
2696
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2697
    if (shift == 0)
2698
        eflags_mask &= 0xffff;
2699
    load_eflags(new_eflags, eflags_mask);
2700
    env->hflags2 &= ~HF2_NMI_MASK;
2701
}
2702

    
2703
static inline void validate_seg(int seg_reg, int cpl)
2704
{
2705
    int dpl;
2706
    uint32_t e2;
2707

    
2708
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2709
       they may still contain a valid base. I would be interested to
2710
       know how a real x86_64 CPU behaves */
2711
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2712
        (env->segs[seg_reg].selector & 0xfffc) == 0)
2713
        return;
2714

    
2715
    e2 = env->segs[seg_reg].flags;
2716
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2717
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2718
        /* data or non conforming code segment */
2719
        if (dpl < cpl) {
2720
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2721
        }
2722
    }
2723
}
2724

    
2725
/* protected mode iret */
2726
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2727
{
2728
    uint32_t new_cs, new_eflags, new_ss;
2729
    uint32_t new_es, new_ds, new_fs, new_gs;
2730
    uint32_t e1, e2, ss_e1, ss_e2;
2731
    int cpl, dpl, rpl, eflags_mask, iopl;
2732
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2733

    
2734
#ifdef TARGET_X86_64
2735
    if (shift == 2)
2736
        sp_mask = -1;
2737
    else
2738
#endif
2739
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2740
    sp = ESP;
2741
    ssp = env->segs[R_SS].base;
2742
    new_eflags = 0; /* avoid warning */
2743
#ifdef TARGET_X86_64
2744
    if (shift == 2) {
2745
        POPQ(sp, new_eip);
2746
        POPQ(sp, new_cs);
2747
        new_cs &= 0xffff;
2748
        if (is_iret) {
2749
            POPQ(sp, new_eflags);
2750
        }
2751
    } else
2752
#endif
2753
    if (shift == 1) {
2754
        /* 32 bits */
2755
        POPL(ssp, sp, sp_mask, new_eip);
2756
        POPL(ssp, sp, sp_mask, new_cs);
2757
        new_cs &= 0xffff;
2758
        if (is_iret) {
2759
            POPL(ssp, sp, sp_mask, new_eflags);
2760
            if (new_eflags & VM_MASK)
2761
                goto return_to_vm86;
2762
        }
2763
    } else {
2764
        /* 16 bits */
2765
        POPW(ssp, sp, sp_mask, new_eip);
2766
        POPW(ssp, sp, sp_mask, new_cs);
2767
        if (is_iret)
2768
            POPW(ssp, sp, sp_mask, new_eflags);
2769
    }
2770
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2771
              new_cs, new_eip, shift, addend);
2772
    LOG_PCALL_STATE(env);
2773
    if ((new_cs & 0xfffc) == 0)
2774
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775
    if (load_segment(&e1, &e2, new_cs) != 0)
2776
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2777
    if (!(e2 & DESC_S_MASK) ||
2778
        !(e2 & DESC_CS_MASK))
2779
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780
    cpl = env->hflags & HF_CPL_MASK;
2781
    rpl = new_cs & 3;
2782
    if (rpl < cpl)
2783
        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2785
    if (e2 & DESC_C_MASK) {
2786
        if (dpl > rpl)
2787
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788
    } else {
2789
        if (dpl != rpl)
2790
            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791
    }
2792
    if (!(e2 & DESC_P_MASK))
2793
        raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794

    
2795
    sp += addend;
2796
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2797
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2798
        /* return to same privilege level */
2799
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2800
                       get_seg_base(e1, e2),
2801
                       get_seg_limit(e1, e2),
2802
                       e2);
2803
    } else {
2804
        /* return to different privilege level */
2805
#ifdef TARGET_X86_64
2806
        if (shift == 2) {
2807
            POPQ(sp, new_esp);
2808
            POPQ(sp, new_ss);
2809
            new_ss &= 0xffff;
2810
        } else
2811
#endif
2812
        if (shift == 1) {
2813
            /* 32 bits */
2814
            POPL(ssp, sp, sp_mask, new_esp);
2815
            POPL(ssp, sp, sp_mask, new_ss);
2816
            new_ss &= 0xffff;
2817
        } else {
2818
            /* 16 bits */
2819
            POPW(ssp, sp, sp_mask, new_esp);
2820
            POPW(ssp, sp, sp_mask, new_ss);
2821
        }
2822
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2823
                    new_ss, new_esp);
2824
        if ((new_ss & 0xfffc) == 0) {
2825
#ifdef TARGET_X86_64
2826
            /* NULL ss is allowed in long mode if cpl != 3*/
2827
            /* XXX: test CS64 ? */
2828
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2829
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2830
                                       0, 0xffffffff,
2831
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2832
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2833
                                       DESC_W_MASK | DESC_A_MASK);
2834
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2835
            } else
2836
#endif
2837
            {
2838
                raise_exception_err(EXCP0D_GPF, 0);
2839
            }
2840
        } else {
2841
            if ((new_ss & 3) != rpl)
2842
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2843
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2844
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2845
            if (!(ss_e2 & DESC_S_MASK) ||
2846
                (ss_e2 & DESC_CS_MASK) ||
2847
                !(ss_e2 & DESC_W_MASK))
2848
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2849
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2850
            if (dpl != rpl)
2851
                raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2852
            if (!(ss_e2 & DESC_P_MASK))
2853
                raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2854
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2855
                                   get_seg_base(ss_e1, ss_e2),
2856
                                   get_seg_limit(ss_e1, ss_e2),
2857
                                   ss_e2);
2858
        }
2859

    
2860
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2861
                       get_seg_base(e1, e2),
2862
                       get_seg_limit(e1, e2),
2863
                       e2);
2864
        cpu_x86_set_cpl(env, rpl);
2865
        sp = new_esp;
2866
#ifdef TARGET_X86_64
2867
        if (env->hflags & HF_CS64_MASK)
2868
            sp_mask = -1;
2869
        else
2870
#endif
2871
            sp_mask = get_sp_mask(ss_e2);
2872

    
2873
        /* validate data segments */
2874
        validate_seg(R_ES, rpl);
2875
        validate_seg(R_DS, rpl);
2876
        validate_seg(R_FS, rpl);
2877
        validate_seg(R_GS, rpl);
2878

    
2879
        sp += addend;
2880
    }
2881
    SET_ESP(sp, sp_mask);
2882
    env->eip = new_eip;
2883
    if (is_iret) {
2884
        /* NOTE: 'cpl' is the _old_ CPL */
2885
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2886
        if (cpl == 0)
2887
            eflags_mask |= IOPL_MASK;
2888
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2889
        if (cpl <= iopl)
2890
            eflags_mask |= IF_MASK;
2891
        if (shift == 0)
2892
            eflags_mask &= 0xffff;
2893
        load_eflags(new_eflags, eflags_mask);
2894
    }
2895
    return;
2896

    
2897
 return_to_vm86:
2898
    POPL(ssp, sp, sp_mask, new_esp);
2899
    POPL(ssp, sp, sp_mask, new_ss);
2900
    POPL(ssp, sp, sp_mask, new_es);
2901
    POPL(ssp, sp, sp_mask, new_ds);
2902
    POPL(ssp, sp, sp_mask, new_fs);
2903
    POPL(ssp, sp, sp_mask, new_gs);
2904

    
2905
    /* modify processor state */
2906
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2907
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2908
    load_seg_vm(R_CS, new_cs & 0xffff);
2909
    cpu_x86_set_cpl(env, 3);
2910
    load_seg_vm(R_SS, new_ss & 0xffff);
2911
    load_seg_vm(R_ES, new_es & 0xffff);
2912
    load_seg_vm(R_DS, new_ds & 0xffff);
2913
    load_seg_vm(R_FS, new_fs & 0xffff);
2914
    load_seg_vm(R_GS, new_gs & 0xffff);
2915

    
2916
    env->eip = new_eip & 0xffff;
2917
    ESP = new_esp;
2918
}
2919

    
2920
void helper_iret_protected(int shift, int next_eip)
2921
{
2922
    int tss_selector, type;
2923
    uint32_t e1, e2;
2924

    
2925
    /* specific case for TSS */
2926
    if (env->eflags & NT_MASK) {
2927
#ifdef TARGET_X86_64
2928
        if (env->hflags & HF_LMA_MASK)
2929
            raise_exception_err(EXCP0D_GPF, 0);
2930
#endif
2931
        tss_selector = lduw_kernel(env->tr.base + 0);
2932
        if (tss_selector & 4)
2933
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2934
        if (load_segment(&e1, &e2, tss_selector) != 0)
2935
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2936
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2937
        /* NOTE: we check both segment and busy TSS */
2938
        if (type != 3)
2939
            raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2940
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2941
    } else {
2942
        helper_ret_protected(shift, 1, 0);
2943
    }
2944
    env->hflags2 &= ~HF2_NMI_MASK;
2945
}
2946

    
2947
void helper_lret_protected(int shift, int addend)
2948
{
2949
    helper_ret_protected(shift, 0, addend);
2950
}
2951

    
2952
void helper_sysenter(void)
2953
{
2954
    if (env->sysenter_cs == 0) {
2955
        raise_exception_err(EXCP0D_GPF, 0);
2956
    }
2957
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2958
    cpu_x86_set_cpl(env, 0);
2959

    
2960
#ifdef TARGET_X86_64
2961
    if (env->hflags & HF_LMA_MASK) {
2962
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2963
                               0, 0xffffffff,
2964
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2965
                               DESC_S_MASK |
2966
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2967
    } else
2968
#endif
2969
    {
2970
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2971
                               0, 0xffffffff,
2972
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2973
                               DESC_S_MASK |
2974
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2975
    }
2976
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2977
                           0, 0xffffffff,
2978
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2979
                           DESC_S_MASK |
2980
                           DESC_W_MASK | DESC_A_MASK);
2981
    ESP = env->sysenter_esp;
2982
    EIP = env->sysenter_eip;
2983
}
2984

    
2985
void helper_sysexit(int dflag)
2986
{
2987
    int cpl;
2988

    
2989
    cpl = env->hflags & HF_CPL_MASK;
2990
    if (env->sysenter_cs == 0 || cpl != 0) {
2991
        raise_exception_err(EXCP0D_GPF, 0);
2992
    }
2993
    cpu_x86_set_cpl(env, 3);
2994
#ifdef TARGET_X86_64
2995
    if (dflag == 2) {
2996
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2997
                               0, 0xffffffff,
2998
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2999
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3000
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3001
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3002
                               0, 0xffffffff,
3003
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3004
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3005
                               DESC_W_MASK | DESC_A_MASK);
3006
    } else
3007
#endif
3008
    {
3009
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3010
                               0, 0xffffffff,
3011
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3012
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3013
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3014
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3015
                               0, 0xffffffff,
3016
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3017
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3018
                               DESC_W_MASK | DESC_A_MASK);
3019
    }
3020
    ESP = ECX;
3021
    EIP = EDX;
3022
}
3023

    
3024
#if defined(CONFIG_USER_ONLY)
3025
target_ulong helper_read_crN(int reg)
3026
{
3027
    return 0;
3028
}
3029

    
3030
void helper_write_crN(int reg, target_ulong t0)
3031
{
3032
}
3033

    
3034
void helper_movl_drN_T0(int reg, target_ulong t0)
3035
{
3036
}
3037
#else
3038
target_ulong helper_read_crN(int reg)
3039
{
3040
    target_ulong val;
3041

    
3042
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3043
    switch(reg) {
3044
    default:
3045
        val = env->cr[reg];
3046
        break;
3047
    case 8:
3048
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3049
            val = cpu_get_apic_tpr(env->apic_state);
3050
        } else {
3051
            val = env->v_tpr;
3052
        }
3053
        break;
3054
    }
3055
    return val;
3056
}
3057

    
3058
void helper_write_crN(int reg, target_ulong t0)
3059
{
3060
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3061
    switch(reg) {
3062
    case 0:
3063
        cpu_x86_update_cr0(env, t0);
3064
        break;
3065
    case 3:
3066
        cpu_x86_update_cr3(env, t0);
3067
        break;
3068
    case 4:
3069
        cpu_x86_update_cr4(env, t0);
3070
        break;
3071
    case 8:
3072
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3073
            cpu_set_apic_tpr(env->apic_state, t0);
3074
        }
3075
        env->v_tpr = t0 & 0x0f;
3076
        break;
3077
    default:
3078
        env->cr[reg] = t0;
3079
        break;
3080
    }
3081
}
3082

    
3083
void helper_movl_drN_T0(int reg, target_ulong t0)
3084
{
3085
    int i;
3086

    
3087
    if (reg < 4) {
3088
        hw_breakpoint_remove(env, reg);
3089
        env->dr[reg] = t0;
3090
        hw_breakpoint_insert(env, reg);
3091
    } else if (reg == 7) {
3092
        for (i = 0; i < 4; i++)
3093
            hw_breakpoint_remove(env, i);
3094
        env->dr[7] = t0;
3095
        for (i = 0; i < 4; i++)
3096
            hw_breakpoint_insert(env, i);
3097
    } else
3098
        env->dr[reg] = t0;
3099
}
3100
#endif
3101

    
3102
void helper_lmsw(target_ulong t0)
3103
{
3104
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3105
       if already set to one. */
3106
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3107
    helper_write_crN(0, t0);
3108
}
3109

    
3110
void helper_clts(void)
3111
{
3112
    env->cr[0] &= ~CR0_TS_MASK;
3113
    env->hflags &= ~HF_TS_MASK;
3114
}
3115

    
3116
void helper_invlpg(target_ulong addr)
3117
{
3118
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3119
    tlb_flush_page(env, addr);
3120
}
3121

    
3122
void helper_rdtsc(void)
3123
{
3124
    uint64_t val;
3125

    
3126
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3127
        raise_exception(EXCP0D_GPF);
3128
    }
3129
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3130

    
3131
    val = cpu_get_tsc(env) + env->tsc_offset;
3132
    EAX = (uint32_t)(val);
3133
    EDX = (uint32_t)(val >> 32);
3134
}
3135

    
3136
void helper_rdtscp(void)
3137
{
3138
    helper_rdtsc();
3139
    ECX = (uint32_t)(env->tsc_aux);
3140
}
3141

    
3142
void helper_rdpmc(void)
3143
{
3144
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3145
        raise_exception(EXCP0D_GPF);
3146
    }
3147
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3148
    
3149
    /* currently unimplemented */
3150
    raise_exception_err(EXCP06_ILLOP, 0);
3151
}
3152

    
3153
#if defined(CONFIG_USER_ONLY)
3154
void helper_wrmsr(void)
3155
{
3156
}
3157

    
3158
void helper_rdmsr(void)
3159
{
3160
}
3161
#else
3162
void helper_wrmsr(void)
3163
{
3164
    uint64_t val;
3165

    
3166
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3167

    
3168
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3169

    
3170
    switch((uint32_t)ECX) {
3171
    case MSR_IA32_SYSENTER_CS:
3172
        env->sysenter_cs = val & 0xffff;
3173
        break;
3174
    case MSR_IA32_SYSENTER_ESP:
3175
        env->sysenter_esp = val;
3176
        break;
3177
    case MSR_IA32_SYSENTER_EIP:
3178
        env->sysenter_eip = val;
3179
        break;
3180
    case MSR_IA32_APICBASE:
3181
        cpu_set_apic_base(env->apic_state, val);
3182
        break;
3183
    case MSR_EFER:
3184
        {
3185
            uint64_t update_mask;
3186
            update_mask = 0;
3187
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3188
                update_mask |= MSR_EFER_SCE;
3189
            if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3190
                update_mask |= MSR_EFER_LME;
3191
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3192
                update_mask |= MSR_EFER_FFXSR;
3193
            if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3194
                update_mask |= MSR_EFER_NXE;
3195
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3196
                update_mask |= MSR_EFER_SVME;
3197
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3198
                update_mask |= MSR_EFER_FFXSR;
3199
            cpu_load_efer(env, (env->efer & ~update_mask) |
3200
                          (val & update_mask));
3201
        }
3202
        break;
3203
    case MSR_STAR:
3204
        env->star = val;
3205
        break;
3206
    case MSR_PAT:
3207
        env->pat = val;
3208
        break;
3209
    case MSR_VM_HSAVE_PA:
3210
        env->vm_hsave = val;
3211
        break;
3212
#ifdef TARGET_X86_64
3213
    case MSR_LSTAR:
3214
        env->lstar = val;
3215
        break;
3216
    case MSR_CSTAR:
3217
        env->cstar = val;
3218
        break;
3219
    case MSR_FMASK:
3220
        env->fmask = val;
3221
        break;
3222
    case MSR_FSBASE:
3223
        env->segs[R_FS].base = val;
3224
        break;
3225
    case MSR_GSBASE:
3226
        env->segs[R_GS].base = val;
3227
        break;
3228
    case MSR_KERNELGSBASE:
3229
        env->kernelgsbase = val;
3230
        break;
3231
#endif
3232
    case MSR_MTRRphysBase(0):
3233
    case MSR_MTRRphysBase(1):
3234
    case MSR_MTRRphysBase(2):
3235
    case MSR_MTRRphysBase(3):
3236
    case MSR_MTRRphysBase(4):
3237
    case MSR_MTRRphysBase(5):
3238
    case MSR_MTRRphysBase(6):
3239
    case MSR_MTRRphysBase(7):
3240
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3241
        break;
3242
    case MSR_MTRRphysMask(0):
3243
    case MSR_MTRRphysMask(1):
3244
    case MSR_MTRRphysMask(2):
3245
    case MSR_MTRRphysMask(3):
3246
    case MSR_MTRRphysMask(4):
3247
    case MSR_MTRRphysMask(5):
3248
    case MSR_MTRRphysMask(6):
3249
    case MSR_MTRRphysMask(7):
3250
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3251
        break;
3252
    case MSR_MTRRfix64K_00000:
3253
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3254
        break;
3255
    case MSR_MTRRfix16K_80000:
3256
    case MSR_MTRRfix16K_A0000:
3257
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3258
        break;
3259
    case MSR_MTRRfix4K_C0000:
3260
    case MSR_MTRRfix4K_C8000:
3261
    case MSR_MTRRfix4K_D0000:
3262
    case MSR_MTRRfix4K_D8000:
3263
    case MSR_MTRRfix4K_E0000:
3264
    case MSR_MTRRfix4K_E8000:
3265
    case MSR_MTRRfix4K_F0000:
3266
    case MSR_MTRRfix4K_F8000:
3267
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3268
        break;
3269
    case MSR_MTRRdefType:
3270
        env->mtrr_deftype = val;
3271
        break;
3272
    case MSR_MCG_STATUS:
3273
        env->mcg_status = val;
3274
        break;
3275
    case MSR_MCG_CTL:
3276
        if ((env->mcg_cap & MCG_CTL_P)
3277
            && (val == 0 || val == ~(uint64_t)0))
3278
            env->mcg_ctl = val;
3279
        break;
3280
    case MSR_TSC_AUX:
3281
        env->tsc_aux = val;
3282
        break;
3283
    default:
3284
        if ((uint32_t)ECX >= MSR_MC0_CTL
3285
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3286
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3287
            if ((offset & 0x3) != 0
3288
                || (val == 0 || val == ~(uint64_t)0))
3289
                env->mce_banks[offset] = val;
3290
            break;
3291
        }
3292
        /* XXX: exception ? */
3293
        break;
3294
    }
3295
}
3296

    
3297
void helper_rdmsr(void)
3298
{
3299
    uint64_t val;
3300

    
3301
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3302

    
3303
    switch((uint32_t)ECX) {
3304
    case MSR_IA32_SYSENTER_CS:
3305
        val = env->sysenter_cs;
3306
        break;
3307
    case MSR_IA32_SYSENTER_ESP:
3308
        val = env->sysenter_esp;
3309
        break;
3310
    case MSR_IA32_SYSENTER_EIP:
3311
        val = env->sysenter_eip;
3312
        break;
3313
    case MSR_IA32_APICBASE:
3314
        val = cpu_get_apic_base(env->apic_state);
3315
        break;
3316
    case MSR_EFER:
3317
        val = env->efer;
3318
        break;
3319
    case MSR_STAR:
3320
        val = env->star;
3321
        break;
3322
    case MSR_PAT:
3323
        val = env->pat;
3324
        break;
3325
    case MSR_VM_HSAVE_PA:
3326
        val = env->vm_hsave;
3327
        break;
3328
    case MSR_IA32_PERF_STATUS:
3329
        /* tsc_increment_by_tick */
3330
        val = 1000ULL;
3331
        /* CPU multiplier */
3332
        val |= (((uint64_t)4ULL) << 40);
3333
        break;
3334
#ifdef TARGET_X86_64
3335
    case MSR_LSTAR:
3336
        val = env->lstar;
3337
        break;
3338
    case MSR_CSTAR:
3339
        val = env->cstar;
3340
        break;
3341
    case MSR_FMASK:
3342
        val = env->fmask;
3343
        break;
3344
    case MSR_FSBASE:
3345
        val = env->segs[R_FS].base;
3346
        break;
3347
    case MSR_GSBASE:
3348
        val = env->segs[R_GS].base;
3349
        break;
3350
    case MSR_KERNELGSBASE:
3351
        val = env->kernelgsbase;
3352
        break;
3353
    case MSR_TSC_AUX:
3354
        val = env->tsc_aux;
3355
        break;
3356
#endif
3357
    case MSR_MTRRphysBase(0):
3358
    case MSR_MTRRphysBase(1):
3359
    case MSR_MTRRphysBase(2):
3360
    case MSR_MTRRphysBase(3):
3361
    case MSR_MTRRphysBase(4):
3362
    case MSR_MTRRphysBase(5):
3363
    case MSR_MTRRphysBase(6):
3364
    case MSR_MTRRphysBase(7):
3365
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3366
        break;
3367
    case MSR_MTRRphysMask(0):
3368
    case MSR_MTRRphysMask(1):
3369
    case MSR_MTRRphysMask(2):
3370
    case MSR_MTRRphysMask(3):
3371
    case MSR_MTRRphysMask(4):
3372
    case MSR_MTRRphysMask(5):
3373
    case MSR_MTRRphysMask(6):
3374
    case MSR_MTRRphysMask(7):
3375
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3376
        break;
3377
    case MSR_MTRRfix64K_00000:
3378
        val = env->mtrr_fixed[0];
3379
        break;
3380
    case MSR_MTRRfix16K_80000:
3381
    case MSR_MTRRfix16K_A0000:
3382
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3383
        break;
3384
    case MSR_MTRRfix4K_C0000:
3385
    case MSR_MTRRfix4K_C8000:
3386
    case MSR_MTRRfix4K_D0000:
3387
    case MSR_MTRRfix4K_D8000:
3388
    case MSR_MTRRfix4K_E0000:
3389
    case MSR_MTRRfix4K_E8000:
3390
    case MSR_MTRRfix4K_F0000:
3391
    case MSR_MTRRfix4K_F8000:
3392
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3393
        break;
3394
    case MSR_MTRRdefType:
3395
        val = env->mtrr_deftype;
3396
        break;
3397
    case MSR_MTRRcap:
3398
        if (env->cpuid_features & CPUID_MTRR)
3399
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3400
        else
3401
            /* XXX: exception ? */
3402
            val = 0;
3403
        break;
3404
    case MSR_MCG_CAP:
3405
        val = env->mcg_cap;
3406
        break;
3407
    case MSR_MCG_CTL:
3408
        if (env->mcg_cap & MCG_CTL_P)
3409
            val = env->mcg_ctl;
3410
        else
3411
            val = 0;
3412
        break;
3413
    case MSR_MCG_STATUS:
3414
        val = env->mcg_status;
3415
        break;
3416
    default:
3417
        if ((uint32_t)ECX >= MSR_MC0_CTL
3418
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3419
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3420
            val = env->mce_banks[offset];
3421
            break;
3422
        }
3423
        /* XXX: exception ? */
3424
        val = 0;
3425
        break;
3426
    }
3427
    EAX = (uint32_t)(val);
3428
    EDX = (uint32_t)(val >> 32);
3429
}
3430
#endif
3431

    
3432
target_ulong helper_lsl(target_ulong selector1)
3433
{
3434
    unsigned int limit;
3435
    uint32_t e1, e2, eflags, selector;
3436
    int rpl, dpl, cpl, type;
3437

    
3438
    selector = selector1 & 0xffff;
3439
    eflags = helper_cc_compute_all(CC_OP);
3440
    if ((selector & 0xfffc) == 0)
3441
        goto fail;
3442
    if (load_segment(&e1, &e2, selector) != 0)
3443
        goto fail;
3444
    rpl = selector & 3;
3445
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3446
    cpl = env->hflags & HF_CPL_MASK;
3447
    if (e2 & DESC_S_MASK) {
3448
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3449
            /* conforming */
3450
        } else {
3451
            if (dpl < cpl || dpl < rpl)
3452
                goto fail;
3453
        }
3454
    } else {
3455
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3456
        switch(type) {
3457
        case 1:
3458
        case 2:
3459
        case 3:
3460
        case 9:
3461
        case 11:
3462
            break;
3463
        default:
3464
            goto fail;
3465
        }
3466
        if (dpl < cpl || dpl < rpl) {
3467
        fail:
3468
            CC_SRC = eflags & ~CC_Z;
3469
            return 0;
3470
        }
3471
    }
3472
    limit = get_seg_limit(e1, e2);
3473
    CC_SRC = eflags | CC_Z;
3474
    return limit;
3475
}
3476

    
3477
target_ulong helper_lar(target_ulong selector1)
3478
{
3479
    uint32_t e1, e2, eflags, selector;
3480
    int rpl, dpl, cpl, type;
3481

    
3482
    selector = selector1 & 0xffff;
3483
    eflags = helper_cc_compute_all(CC_OP);
3484
    if ((selector & 0xfffc) == 0)
3485
        goto fail;
3486
    if (load_segment(&e1, &e2, selector) != 0)
3487
        goto fail;
3488
    rpl = selector & 3;
3489
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3490
    cpl = env->hflags & HF_CPL_MASK;
3491
    if (e2 & DESC_S_MASK) {
3492
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3493
            /* conforming */
3494
        } else {
3495
            if (dpl < cpl || dpl < rpl)
3496
                goto fail;
3497
        }
3498
    } else {
3499
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3500
        switch(type) {
3501
        case 1:
3502
        case 2:
3503
        case 3:
3504
        case 4:
3505
        case 5:
3506
        case 9:
3507
        case 11:
3508
        case 12:
3509
            break;
3510
        default:
3511
            goto fail;
3512
        }
3513
        if (dpl < cpl || dpl < rpl) {
3514
        fail:
3515
            CC_SRC = eflags & ~CC_Z;
3516
            return 0;
3517
        }
3518
    }
3519
    CC_SRC = eflags | CC_Z;
3520
    return e2 & 0x00f0ff00;
3521
}
3522

    
3523
void helper_verr(target_ulong selector1)
3524
{
3525
    uint32_t e1, e2, eflags, selector;
3526
    int rpl, dpl, cpl;
3527

    
3528
    selector = selector1 & 0xffff;
3529
    eflags = helper_cc_compute_all(CC_OP);
3530
    if ((selector & 0xfffc) == 0)
3531
        goto fail;
3532
    if (load_segment(&e1, &e2, selector) != 0)
3533
        goto fail;
3534
    if (!(e2 & DESC_S_MASK))
3535
        goto fail;
3536
    rpl = selector & 3;
3537
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3538
    cpl = env->hflags & HF_CPL_MASK;
3539
    if (e2 & DESC_CS_MASK) {
3540
        if (!(e2 & DESC_R_MASK))
3541
            goto fail;
3542
        if (!(e2 & DESC_C_MASK)) {
3543
            if (dpl < cpl || dpl < rpl)
3544
                goto fail;
3545
        }
3546
    } else {
3547
        if (dpl < cpl || dpl < rpl) {
3548
        fail:
3549
            CC_SRC = eflags & ~CC_Z;
3550
            return;
3551
        }
3552
    }
3553
    CC_SRC = eflags | CC_Z;
3554
}
3555

    
3556
void helper_verw(target_ulong selector1)
3557
{
3558
    uint32_t e1, e2, eflags, selector;
3559
    int rpl, dpl, cpl;
3560

    
3561
    selector = selector1 & 0xffff;
3562
    eflags = helper_cc_compute_all(CC_OP);
3563
    if ((selector & 0xfffc) == 0)
3564
        goto fail;
3565
    if (load_segment(&e1, &e2, selector) != 0)
3566
        goto fail;
3567
    if (!(e2 & DESC_S_MASK))
3568
        goto fail;
3569
    rpl = selector & 3;
3570
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3571
    cpl = env->hflags & HF_CPL_MASK;
3572
    if (e2 & DESC_CS_MASK) {
3573
        goto fail;
3574
    } else {
3575
        if (dpl < cpl || dpl < rpl)
3576
            goto fail;
3577
        if (!(e2 & DESC_W_MASK)) {
3578
        fail:
3579
            CC_SRC = eflags & ~CC_Z;
3580
            return;
3581
        }
3582
    }
3583
    CC_SRC = eflags | CC_Z;
3584
}
3585

    
3586
/* x87 FPU helpers */
3587

    
3588
static inline double floatx80_to_double(floatx80 a)
3589
{
3590
    union {
3591
        float64 f64;
3592
        double d;
3593
    } u;
3594

    
3595
    u.f64 = floatx80_to_float64(a, &env->fp_status);
3596
    return u.d;
3597
}
3598

    
3599
static inline floatx80 double_to_floatx80(double a)
3600
{
3601
    union {
3602
        float64 f64;
3603
        double d;
3604
    } u;
3605

    
3606
    u.d = a;
3607
    return float64_to_floatx80(u.f64, &env->fp_status);
3608
}
3609

    
3610
static void fpu_set_exception(int mask)
3611
{
3612
    env->fpus |= mask;
3613
    if (env->fpus & (~env->fpuc & FPUC_EM))
3614
        env->fpus |= FPUS_SE | FPUS_B;
3615
}
3616

    
3617
static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3618
{
3619
    if (floatx80_is_zero(b)) {
3620
        fpu_set_exception(FPUS_ZE);
3621
    }
3622
    return floatx80_div(a, b, &env->fp_status);
3623
}
3624

    
3625
static void fpu_raise_exception(void)
3626
{
3627
    if (env->cr[0] & CR0_NE_MASK) {
3628
        raise_exception(EXCP10_COPR);
3629
    }
3630
#if !defined(CONFIG_USER_ONLY)
3631
    else {
3632
        cpu_set_ferr(env);
3633
    }
3634
#endif
3635
}
3636

    
3637
void helper_flds_FT0(uint32_t val)
3638
{
3639
    union {
3640
        float32 f;
3641
        uint32_t i;
3642
    } u;
3643
    u.i = val;
3644
    FT0 = float32_to_floatx80(u.f, &env->fp_status);
3645
}
3646

    
3647
void helper_fldl_FT0(uint64_t val)
3648
{
3649
    union {
3650
        float64 f;
3651
        uint64_t i;
3652
    } u;
3653
    u.i = val;
3654
    FT0 = float64_to_floatx80(u.f, &env->fp_status);
3655
}
3656

    
3657
void helper_fildl_FT0(int32_t val)
3658
{
3659
    FT0 = int32_to_floatx80(val, &env->fp_status);
3660
}
3661

    
3662
void helper_flds_ST0(uint32_t val)
3663
{
3664
    int new_fpstt;
3665
    union {
3666
        float32 f;
3667
        uint32_t i;
3668
    } u;
3669
    new_fpstt = (env->fpstt - 1) & 7;
3670
    u.i = val;
3671
    env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3672
    env->fpstt = new_fpstt;
3673
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3674
}
3675

    
3676
void helper_fldl_ST0(uint64_t val)
3677
{
3678
    int new_fpstt;
3679
    union {
3680
        float64 f;
3681
        uint64_t i;
3682
    } u;
3683
    new_fpstt = (env->fpstt - 1) & 7;
3684
    u.i = val;
3685
    env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3686
    env->fpstt = new_fpstt;
3687
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3688
}
3689

    
3690
void helper_fildl_ST0(int32_t val)
3691
{
3692
    int new_fpstt;
3693
    new_fpstt = (env->fpstt - 1) & 7;
3694
    env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3695
    env->fpstt = new_fpstt;
3696
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3697
}
3698

    
3699
void helper_fildll_ST0(int64_t val)
3700
{
3701
    int new_fpstt;
3702
    new_fpstt = (env->fpstt - 1) & 7;
3703
    env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3704
    env->fpstt = new_fpstt;
3705
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3706
}
3707

    
3708
uint32_t helper_fsts_ST0(void)
3709
{
3710
    union {
3711
        float32 f;
3712
        uint32_t i;
3713
    } u;
3714
    u.f = floatx80_to_float32(ST0, &env->fp_status);
3715
    return u.i;
3716
}
3717

    
3718
uint64_t helper_fstl_ST0(void)
3719
{
3720
    union {
3721
        float64 f;
3722
        uint64_t i;
3723
    } u;
3724
    u.f = floatx80_to_float64(ST0, &env->fp_status);
3725
    return u.i;
3726
}
3727

    
3728
int32_t helper_fist_ST0(void)
3729
{
3730
    int32_t val;
3731
    val = floatx80_to_int32(ST0, &env->fp_status);
3732
    if (val != (int16_t)val)
3733
        val = -32768;
3734
    return val;
3735
}
3736

    
3737
int32_t helper_fistl_ST0(void)
3738
{
3739
    int32_t val;
3740
    val = floatx80_to_int32(ST0, &env->fp_status);
3741
    return val;
3742
}
3743

    
3744
int64_t helper_fistll_ST0(void)
3745
{
3746
    int64_t val;
3747
    val = floatx80_to_int64(ST0, &env->fp_status);
3748
    return val;
3749
}
3750

    
3751
int32_t helper_fistt_ST0(void)
3752
{
3753
    int32_t val;
3754
    val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3755
    if (val != (int16_t)val)
3756
        val = -32768;
3757
    return val;
3758
}
3759

    
3760
int32_t helper_fisttl_ST0(void)
3761
{
3762
    int32_t val;
3763
    val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3764
    return val;
3765
}
3766

    
3767
int64_t helper_fisttll_ST0(void)
3768
{
3769
    int64_t val;
3770
    val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3771
    return val;
3772
}
3773

    
3774
void helper_fldt_ST0(target_ulong ptr)
3775
{
3776
    int new_fpstt;
3777
    new_fpstt = (env->fpstt - 1) & 7;
3778
    env->fpregs[new_fpstt].d = helper_fldt(ptr);
3779
    env->fpstt = new_fpstt;
3780
    env->fptags[new_fpstt] = 0; /* validate stack entry */
3781
}
3782

    
3783
void helper_fstt_ST0(target_ulong ptr)
3784
{
3785
    helper_fstt(ST0, ptr);
3786
}
3787

    
3788
void helper_fpush(void)
3789
{
3790
    fpush();
3791
}
3792

    
3793
void helper_fpop(void)
3794
{
3795
    fpop();
3796
}
3797

    
3798
void helper_fdecstp(void)
3799
{
3800
    env->fpstt = (env->fpstt - 1) & 7;
3801
    env->fpus &= (~0x4700);
3802
}
3803

    
3804
void helper_fincstp(void)
3805
{
3806
    env->fpstt = (env->fpstt + 1) & 7;
3807
    env->fpus &= (~0x4700);
3808
}
3809

    
3810
/* FPU move */
3811

    
3812
void helper_ffree_STN(int st_index)
3813
{
3814
    env->fptags[(env->fpstt + st_index) & 7] = 1;
3815
}
3816

    
3817
void helper_fmov_ST0_FT0(void)
3818
{
3819
    ST0 = FT0;
3820
}
3821

    
3822
void helper_fmov_FT0_STN(int st_index)
3823
{
3824
    FT0 = ST(st_index);
3825
}
3826

    
3827
void helper_fmov_ST0_STN(int st_index)
3828
{
3829
    ST0 = ST(st_index);
3830
}
3831

    
3832
void helper_fmov_STN_ST0(int st_index)
3833
{
3834
    ST(st_index) = ST0;
3835
}
3836

    
3837
void helper_fxchg_ST0_STN(int st_index)
3838
{
3839
    floatx80 tmp;
3840
    tmp = ST(st_index);
3841
    ST(st_index) = ST0;
3842
    ST0 = tmp;
3843
}
3844

    
3845
/* FPU operations */
3846

    
3847
static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3848

    
3849
void helper_fcom_ST0_FT0(void)
3850
{
3851
    int ret;
3852

    
3853
    ret = floatx80_compare(ST0, FT0, &env->fp_status);
3854
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3855
}
3856

    
3857
void helper_fucom_ST0_FT0(void)
3858
{
3859
    int ret;
3860

    
3861
    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3862
    env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3863
}
3864

    
3865
static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3866

    
3867
void helper_fcomi_ST0_FT0(void)
3868
{
3869
    int eflags;
3870
    int ret;
3871

    
3872
    ret = floatx80_compare(ST0, FT0, &env->fp_status);
3873
    eflags = helper_cc_compute_all(CC_OP);
3874
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3875
    CC_SRC = eflags;
3876
}
3877

    
3878
void helper_fucomi_ST0_FT0(void)
3879
{
3880
    int eflags;
3881
    int ret;
3882

    
3883
    ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3884
    eflags = helper_cc_compute_all(CC_OP);
3885
    eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3886
    CC_SRC = eflags;
3887
}
3888

    
3889
void helper_fadd_ST0_FT0(void)
3890
{
3891
    ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3892
}
3893

    
3894
void helper_fmul_ST0_FT0(void)
3895
{
3896
    ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3897
}
3898

    
3899
void helper_fsub_ST0_FT0(void)
3900
{
3901
    ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3902
}
3903

    
3904
void helper_fsubr_ST0_FT0(void)
3905
{
3906
    ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3907
}
3908

    
3909
void helper_fdiv_ST0_FT0(void)
3910
{
3911
    ST0 = helper_fdiv(ST0, FT0);
3912
}
3913

    
3914
void helper_fdivr_ST0_FT0(void)
3915
{
3916
    ST0 = helper_fdiv(FT0, ST0);
3917
}
3918

    
3919
/* fp operations between STN and ST0 */
3920

    
3921
void helper_fadd_STN_ST0(int st_index)
3922
{
3923
    ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3924
}
3925

    
3926
void helper_fmul_STN_ST0(int st_index)
3927
{
3928
    ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3929
}
3930

    
3931
void helper_fsub_STN_ST0(int st_index)
3932
{
3933
    ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3934
}
3935

    
3936
void helper_fsubr_STN_ST0(int st_index)
3937
{
3938
    ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3939
}
3940

    
3941
void helper_fdiv_STN_ST0(int st_index)
3942
{
3943
    floatx80 *p;
3944
    p = &ST(st_index);
3945
    *p = helper_fdiv(*p, ST0);
3946
}
3947

    
3948
void helper_fdivr_STN_ST0(int st_index)
3949
{
3950
    floatx80 *p;
3951
    p = &ST(st_index);
3952
    *p = helper_fdiv(ST0, *p);
3953
}
3954

    
3955
/* misc FPU operations */
3956
void helper_fchs_ST0(void)
3957
{
3958
    ST0 = floatx80_chs(ST0);
3959
}
3960

    
3961
void helper_fabs_ST0(void)
3962
{
3963
    ST0 = floatx80_abs(ST0);
3964
}
3965

    
3966
void helper_fld1_ST0(void)
3967
{
3968
    ST0 = floatx80_one;
3969
}
3970

    
3971
void helper_fldl2t_ST0(void)
3972
{
3973
    ST0 = floatx80_l2t;
3974
}
3975

    
3976
void helper_fldl2e_ST0(void)
3977
{
3978
    ST0 = floatx80_l2e;
3979
}
3980

    
3981
void helper_fldpi_ST0(void)
3982
{
3983
    ST0 = floatx80_pi;
3984
}
3985

    
3986
void helper_fldlg2_ST0(void)
3987
{
3988
    ST0 = floatx80_lg2;
3989
}
3990

    
3991
void helper_fldln2_ST0(void)
3992
{
3993
    ST0 = floatx80_ln2;
3994
}
3995

    
3996
void helper_fldz_ST0(void)
3997
{
3998
    ST0 = floatx80_zero;
3999
}
4000

    
4001
void helper_fldz_FT0(void)
4002
{
4003
    FT0 = floatx80_zero;
4004
}
4005

    
4006
uint32_t helper_fnstsw(void)
4007
{
4008
    return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4009
}
4010

    
4011
uint32_t helper_fnstcw(void)
4012
{
4013
    return env->fpuc;
4014
}
4015

    
4016
static void update_fp_status(void)
4017
{
4018
    int rnd_type;
4019

    
4020
    /* set rounding mode */
4021
    switch(env->fpuc & RC_MASK) {
4022
    default:
4023
    case RC_NEAR:
4024
        rnd_type = float_round_nearest_even;
4025
        break;
4026
    case RC_DOWN:
4027
        rnd_type = float_round_down;
4028
        break;
4029
    case RC_UP:
4030
        rnd_type = float_round_up;
4031
        break;
4032
    case RC_CHOP:
4033
        rnd_type = float_round_to_zero;
4034
        break;
4035
    }
4036
    set_float_rounding_mode(rnd_type, &env->fp_status);
4037
    switch((env->fpuc >> 8) & 3) {
4038
    case 0:
4039
        rnd_type = 32;
4040
        break;
4041
    case 2:
4042
        rnd_type = 64;
4043
        break;
4044
    case 3:
4045
    default:
4046
        rnd_type = 80;
4047
        break;
4048
    }
4049
    set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4050
}
4051

    
4052
void helper_fldcw(uint32_t val)
4053
{
4054
    env->fpuc = val;
4055
    update_fp_status();
4056
}
4057

    
4058
void helper_fclex(void)
4059
{
4060
    env->fpus &= 0x7f00;
4061
}
4062

    
4063
void helper_fwait(void)
4064
{
4065
    if (env->fpus & FPUS_SE)
4066
        fpu_raise_exception();
4067
}
4068

    
4069
void helper_fninit(void)
4070
{
4071
    env->fpus = 0;
4072
    env->fpstt = 0;
4073
    env->fpuc = 0x37f;
4074
    env->fptags[0] = 1;
4075
    env->fptags[1] = 1;
4076
    env->fptags[2] = 1;
4077
    env->fptags[3] = 1;
4078
    env->fptags[4] = 1;
4079
    env->fptags[5] = 1;
4080
    env->fptags[6] = 1;
4081
    env->fptags[7] = 1;
4082
}
4083

    
4084
/* BCD ops */
4085

    
4086
void helper_fbld_ST0(target_ulong ptr)
4087
{
4088
    floatx80 tmp;
4089
    uint64_t val;
4090
    unsigned int v;
4091
    int i;
4092

    
4093
    val = 0;
4094
    for(i = 8; i >= 0; i--) {
4095
        v = ldub(ptr + i);
4096
        val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4097
    }
4098
    tmp = int64_to_floatx80(val, &env->fp_status);
4099
    if (ldub(ptr + 9) & 0x80) {
4100
        floatx80_chs(tmp);
4101
    }
4102
    fpush();
4103
    ST0 = tmp;
4104
}
4105

    
4106
void helper_fbst_ST0(target_ulong ptr)
4107
{
4108
    int v;
4109
    target_ulong mem_ref, mem_end;
4110
    int64_t val;
4111

    
4112
    val = floatx80_to_int64(ST0, &env->fp_status);
4113
    mem_ref = ptr;
4114
    mem_end = mem_ref + 9;
4115
    if (val < 0) {
4116
        stb(mem_end, 0x80);
4117
        val = -val;
4118
    } else {
4119
        stb(mem_end, 0x00);
4120
    }
4121
    while (mem_ref < mem_end) {
4122
        if (val == 0)
4123
            break;
4124
        v = val % 100;
4125
        val = val / 100;
4126
        v = ((v / 10) << 4) | (v % 10);
4127
        stb(mem_ref++, v);
4128
    }
4129
    while (mem_ref < mem_end) {
4130
        stb(mem_ref++, 0);
4131
    }
4132
}
4133

    
4134
void helper_f2xm1(void)
4135
{
4136
    double val = floatx80_to_double(ST0);
4137
    val = pow(2.0, val) - 1.0;
4138
    ST0 = double_to_floatx80(val);
4139
}
4140

    
4141
void helper_fyl2x(void)
4142
{
4143
    double fptemp = floatx80_to_double(ST0);
4144

    
4145
    if (fptemp>0.0){
4146
        fptemp = log(fptemp)/log(2.0);    /* log2(ST) */
4147
        fptemp *= floatx80_to_double(ST1);
4148
        ST1 = double_to_floatx80(fptemp);
4149
        fpop();
4150
    } else {
4151
        env->fpus &= (~0x4700);
4152
        env->fpus |= 0x400;
4153
    }
4154
}
4155

    
4156
void helper_fptan(void)
4157
{
4158
    double fptemp = floatx80_to_double(ST0);
4159

    
4160
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4161
        env->fpus |= 0x400;
4162
    } else {
4163
        fptemp = tan(fptemp);
4164
        ST0 = double_to_floatx80(fptemp);
4165
        fpush();
4166
        ST0 = floatx80_one;
4167
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4168
        /* the above code is for  |arg| < 2**52 only */
4169
    }
4170
}
4171

    
4172
void helper_fpatan(void)
4173
{
4174
    double fptemp, fpsrcop;
4175

    
4176
    fpsrcop = floatx80_to_double(ST1);
4177
    fptemp = floatx80_to_double(ST0);
4178
    ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4179
    fpop();
4180
}
4181

    
4182
void helper_fxtract(void)
4183
{
4184
    CPU_LDoubleU temp;
4185

    
4186
    temp.d = ST0;
4187

    
4188
    if (floatx80_is_zero(ST0)) {
4189
        /* Easy way to generate -inf and raising division by 0 exception */
4190
        ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4191
        fpush();
4192
        ST0 = temp.d;
4193
    } else {
4194
        int expdif;
4195

    
4196
        expdif = EXPD(temp) - EXPBIAS;
4197
        /*DP exponent bias*/
4198
        ST0 = int32_to_floatx80(expdif, &env->fp_status);
4199
        fpush();
4200
        BIASEXPONENT(temp);
4201
        ST0 = temp.d;
4202
    }
4203
}
4204

    
4205
void helper_fprem1(void)
4206
{
4207
    double st0, st1, dblq, fpsrcop, fptemp;
4208
    CPU_LDoubleU fpsrcop1, fptemp1;
4209
    int expdif;
4210
    signed long long int q;
4211

    
4212
    st0 = floatx80_to_double(ST0);
4213
    st1 = floatx80_to_double(ST1);
4214

    
4215
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4216
        ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4217
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4218
        return;
4219
    }
4220

    
4221
    fpsrcop = st0;
4222
    fptemp = st1;
4223
    fpsrcop1.d = ST0;
4224
    fptemp1.d = ST1;
4225
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4226

    
4227
    if (expdif < 0) {
4228
        /* optimisation? taken from the AMD docs */
4229
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4230
        /* ST0 is unchanged */
4231
        return;
4232
    }
4233

    
4234
    if (expdif < 53) {
4235
        dblq = fpsrcop / fptemp;
4236
        /* round dblq towards nearest integer */
4237
        dblq = rint(dblq);
4238
        st0 = fpsrcop - fptemp * dblq;
4239

    
4240
        /* convert dblq to q by truncating towards zero */
4241
        if (dblq < 0.0)
4242
           q = (signed long long int)(-dblq);
4243
        else
4244
           q = (signed long long int)dblq;
4245

    
4246
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4247
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4248
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4249
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4250
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4251
    } else {
4252
        env->fpus |= 0x400;  /* C2 <-- 1 */
4253
        fptemp = pow(2.0, expdif - 50);
4254
        fpsrcop = (st0 / st1) / fptemp;
4255
        /* fpsrcop = integer obtained by chopping */
4256
        fpsrcop = (fpsrcop < 0.0) ?
4257
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4258
        st0 -= (st1 * fpsrcop * fptemp);
4259
    }
4260
    ST0 = double_to_floatx80(st0);
4261
}
4262

    
4263
void helper_fprem(void)
4264
{
4265
    double st0, st1, dblq, fpsrcop, fptemp;
4266
    CPU_LDoubleU fpsrcop1, fptemp1;
4267
    int expdif;
4268
    signed long long int q;
4269

    
4270
    st0 = floatx80_to_double(ST0);
4271
    st1 = floatx80_to_double(ST1);
4272

    
4273
    if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4274
       ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4275
       env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4276
       return;
4277
    }
4278

    
4279
    fpsrcop = st0;
4280
    fptemp = st1;
4281
    fpsrcop1.d = ST0;
4282
    fptemp1.d = ST1;
4283
    expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4284

    
4285
    if (expdif < 0) {
4286
        /* optimisation? taken from the AMD docs */
4287
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4288
        /* ST0 is unchanged */
4289
        return;
4290
    }
4291

    
4292
    if ( expdif < 53 ) {
4293
        dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4294
        /* round dblq towards zero */
4295
        dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4296
        st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4297

    
4298
        /* convert dblq to q by truncating towards zero */
4299
        if (dblq < 0.0)
4300
           q = (signed long long int)(-dblq);
4301
        else
4302
           q = (signed long long int)dblq;
4303

    
4304
        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4305
                                /* (C0,C3,C1) <-- (q2,q1,q0) */
4306
        env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4307
        env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4308
        env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4309
    } else {
4310
        int N = 32 + (expdif % 32); /* as per AMD docs */
4311
        env->fpus |= 0x400;  /* C2 <-- 1 */
4312
        fptemp = pow(2.0, (double)(expdif - N));
4313
        fpsrcop = (st0 / st1) / fptemp;
4314
        /* fpsrcop = integer obtained by chopping */
4315
        fpsrcop = (fpsrcop < 0.0) ?
4316
                  -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4317
        st0 -= (st1 * fpsrcop * fptemp);
4318
    }
4319
    ST0 = double_to_floatx80(st0);
4320
}
4321

    
4322
void helper_fyl2xp1(void)
4323
{
4324
    double fptemp = floatx80_to_double(ST0);
4325

    
4326
    if ((fptemp+1.0)>0.0) {
4327
        fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4328
        fptemp *= floatx80_to_double(ST1);
4329
        ST1 = double_to_floatx80(fptemp);
4330
        fpop();
4331
    } else {
4332
        env->fpus &= (~0x4700);
4333
        env->fpus |= 0x400;
4334
    }
4335
}
4336

    
4337
void helper_fsqrt(void)
4338
{
4339
    if (floatx80_is_neg(ST0)) {
4340
        env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4341
        env->fpus |= 0x400;
4342
    }
4343
    ST0 = floatx80_sqrt(ST0, &env->fp_status);
4344
}
4345

    
4346
void helper_fsincos(void)
4347
{
4348
    double fptemp = floatx80_to_double(ST0);
4349

    
4350
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4351
        env->fpus |= 0x400;
4352
    } else {
4353
        ST0 = double_to_floatx80(sin(fptemp));
4354
        fpush();
4355
        ST0 = double_to_floatx80(cos(fptemp));
4356
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4357
        /* the above code is for  |arg| < 2**63 only */
4358
    }
4359
}
4360

    
4361
void helper_frndint(void)
4362
{
4363
    ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4364
}
4365

    
4366
void helper_fscale(void)
4367
{
4368
    if (floatx80_is_any_nan(ST1)) {
4369
        ST0 = ST1;
4370
    } else {
4371
        int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4372
        ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4373
    }
4374
}
4375

    
4376
void helper_fsin(void)
4377
{
4378
    double fptemp = floatx80_to_double(ST0);
4379

    
4380
    if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4381
        env->fpus |= 0x400;
4382
    } else {
4383
        ST0 = double_to_floatx80(sin(fptemp));
4384
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4385
        /* the above code is for  |arg| < 2**53 only */
4386
    }
4387
}
4388

    
4389
void helper_fcos(void)
4390
{
4391
    double fptemp = floatx80_to_double(ST0);
4392

    
4393
    if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4394
        env->fpus |= 0x400;
4395
    } else {
4396
        ST0 = double_to_floatx80(cos(fptemp));
4397
        env->fpus &= (~0x400);  /* C2 <-- 0 */
4398
        /* the above code is for  |arg5 < 2**63 only */
4399
    }
4400
}
4401

    
4402
void helper_fxam_ST0(void)
4403
{
4404
    CPU_LDoubleU temp;
4405
    int expdif;
4406

    
4407
    temp.d = ST0;
4408

    
4409
    env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4410
    if (SIGND(temp))
4411
        env->fpus |= 0x200; /* C1 <-- 1 */
4412

    
4413
    /* XXX: test fptags too */
4414
    expdif = EXPD(temp);
4415
    if (expdif == MAXEXPD) {
4416
        if (MANTD(temp) == 0x8000000000000000ULL)
4417
            env->fpus |=  0x500 /*Infinity*/;
4418
        else
4419
            env->fpus |=  0x100 /*NaN*/;
4420
    } else if (expdif == 0) {
4421
        if (MANTD(temp) == 0)
4422
            env->fpus |=  0x4000 /*Zero*/;
4423
        else
4424
            env->fpus |= 0x4400 /*Denormal*/;
4425
    } else {
4426
        env->fpus |= 0x400;
4427
    }
4428
}
4429

    
4430
void helper_fstenv(target_ulong ptr, int data32)
4431
{
4432
    int fpus, fptag, exp, i;
4433
    uint64_t mant;
4434
    CPU_LDoubleU tmp;
4435

    
4436
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4437
    fptag = 0;
4438
    for (i=7; i>=0; i--) {
4439
        fptag <<= 2;
4440
        if (env->fptags[i]) {
4441
            fptag |= 3;
4442
        } else {
4443
            tmp.d = env->fpregs[i].d;
4444
            exp = EXPD(tmp);
4445
            mant = MANTD(tmp);
4446
            if (exp == 0 && mant == 0) {
4447
                /* zero */
4448
                fptag |= 1;
4449
            } else if (exp == 0 || exp == MAXEXPD
4450
                       || (mant & (1LL << 63)) == 0
4451
                       ) {
4452
                /* NaNs, infinity, denormal */
4453
                fptag |= 2;
4454
            }
4455
        }
4456
    }
4457
    if (data32) {
4458
        /* 32 bit */
4459
        stl(ptr, env->fpuc);
4460
        stl(ptr + 4, fpus);
4461
        stl(ptr + 8, fptag);
4462
        stl(ptr + 12, 0); /* fpip */
4463
        stl(ptr + 16, 0); /* fpcs */
4464
        stl(ptr + 20, 0); /* fpoo */
4465
        stl(ptr + 24, 0); /* fpos */
4466
    } else {
4467
        /* 16 bit */
4468
        stw(ptr, env->fpuc);
4469
        stw(ptr + 2, fpus);
4470
        stw(ptr + 4, fptag);
4471
        stw(ptr + 6, 0);
4472
        stw(ptr + 8, 0);
4473
        stw(ptr + 10, 0);
4474
        stw(ptr + 12, 0);
4475
    }
4476
}
4477

    
4478
void helper_fldenv(target_ulong ptr, int data32)
4479
{
4480
    int i, fpus, fptag;
4481

    
4482
    if (data32) {
4483
        env->fpuc = lduw(ptr);
4484
        fpus = lduw(ptr + 4);
4485
        fptag = lduw(ptr + 8);
4486
    }
4487
    else {
4488
        env->fpuc = lduw(ptr);
4489
        fpus = lduw(ptr + 2);
4490
        fptag = lduw(ptr + 4);
4491
    }
4492
    env->fpstt = (fpus >> 11) & 7;
4493
    env->fpus = fpus & ~0x3800;
4494
    for(i = 0;i < 8; i++) {
4495
        env->fptags[i] = ((fptag & 3) == 3);
4496
        fptag >>= 2;
4497
    }
4498
}
4499

    
4500
void helper_fsave(target_ulong ptr, int data32)
4501
{
4502
    floatx80 tmp;
4503
    int i;
4504

    
4505
    helper_fstenv(ptr, data32);
4506

    
4507
    ptr += (14 << data32);
4508
    for(i = 0;i < 8; i++) {
4509
        tmp = ST(i);
4510
        helper_fstt(tmp, ptr);
4511
        ptr += 10;
4512
    }
4513

    
4514
    /* fninit */
4515
    env->fpus = 0;
4516
    env->fpstt = 0;
4517
    env->fpuc = 0x37f;
4518
    env->fptags[0] = 1;
4519
    env->fptags[1] = 1;
4520
    env->fptags[2] = 1;
4521
    env->fptags[3] = 1;
4522
    env->fptags[4] = 1;
4523
    env->fptags[5] = 1;
4524
    env->fptags[6] = 1;
4525
    env->fptags[7] = 1;
4526
}
4527

    
4528
void helper_frstor(target_ulong ptr, int data32)
4529
{
4530
    floatx80 tmp;
4531
    int i;
4532

    
4533
    helper_fldenv(ptr, data32);
4534
    ptr += (14 << data32);
4535

    
4536
    for(i = 0;i < 8; i++) {
4537
        tmp = helper_fldt(ptr);
4538
        ST(i) = tmp;
4539
        ptr += 10;
4540
    }
4541
}
4542

    
4543

    
4544
#if defined(CONFIG_USER_ONLY)
4545
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4546
{
4547
    CPUX86State *saved_env;
4548

    
4549
    saved_env = env;
4550
    env = s;
4551
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4552
        selector &= 0xffff;
4553
        cpu_x86_load_seg_cache(env, seg_reg, selector,
4554
                               (selector << 4), 0xffff, 0);
4555
    } else {
4556
        helper_load_seg(seg_reg, selector);
4557
    }
4558
    env = saved_env;
4559
}
4560

    
4561
void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4562
{
4563
    CPUX86State *saved_env;
4564

    
4565
    saved_env = env;
4566
    env = s;
4567

    
4568
    helper_fsave(ptr, data32);
4569

    
4570
    env = saved_env;
4571
}
4572

    
4573
void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4574
{
4575
    CPUX86State *saved_env;
4576

    
4577
    saved_env = env;
4578
    env = s;
4579

    
4580
    helper_frstor(ptr, data32);
4581

    
4582
    env = saved_env;
4583
}
4584
#endif
4585

    
4586
void helper_fxsave(target_ulong ptr, int data64)
4587
{
4588
    int fpus, fptag, i, nb_xmm_regs;
4589
    floatx80 tmp;
4590
    target_ulong addr;
4591

    
4592
    /* The operand must be 16 byte aligned */
4593
    if (ptr & 0xf) {
4594
        raise_exception(EXCP0D_GPF);
4595
    }
4596

    
4597
    fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4598
    fptag = 0;
4599
    for(i = 0; i < 8; i++) {
4600
        fptag |= (env->fptags[i] << i);
4601
    }
4602
    stw(ptr, env->fpuc);
4603
    stw(ptr + 2, fpus);
4604
    stw(ptr + 4, fptag ^ 0xff);
4605
#ifdef TARGET_X86_64
4606
    if (data64) {
4607
        stq(ptr + 0x08, 0); /* rip */
4608
        stq(ptr + 0x10, 0); /* rdp */
4609
    } else 
4610
#endif
4611
    {
4612
        stl(ptr + 0x08, 0); /* eip */
4613
        stl(ptr + 0x0c, 0); /* sel  */
4614
        stl(ptr + 0x10, 0); /* dp */
4615
        stl(ptr + 0x14, 0); /* sel  */
4616
    }
4617

    
4618
    addr = ptr + 0x20;
4619
    for(i = 0;i < 8; i++) {
4620
        tmp = ST(i);
4621
        helper_fstt(tmp, addr);
4622
        addr += 16;
4623
    }
4624

    
4625
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4626
        /* XXX: finish it */
4627
        stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4628
        stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4629
        if (env->hflags & HF_CS64_MASK)
4630
            nb_xmm_regs = 16;
4631
        else
4632
            nb_xmm_regs = 8;
4633
        addr = ptr + 0xa0;
4634
        /* Fast FXSAVE leaves out the XMM registers */
4635
        if (!(env->efer & MSR_EFER_FFXSR)
4636
          || (env->hflags & HF_CPL_MASK)
4637
          || !(env->hflags & HF_LMA_MASK)) {
4638
            for(i = 0; i < nb_xmm_regs; i++) {
4639
                stq(addr, env->xmm_regs[i].XMM_Q(0));
4640
                stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4641
                addr += 16;
4642
            }
4643
        }
4644
    }
4645
}
4646

    
4647
void helper_fxrstor(target_ulong ptr, int data64)
4648
{
4649
    int i, fpus, fptag, nb_xmm_regs;
4650
    floatx80 tmp;
4651
    target_ulong addr;
4652

    
4653
    /* The operand must be 16 byte aligned */
4654
    if (ptr & 0xf) {
4655
        raise_exception(EXCP0D_GPF);
4656
    }
4657

    
4658
    env->fpuc = lduw(ptr);
4659
    fpus = lduw(ptr + 2);
4660
    fptag = lduw(ptr + 4);
4661
    env->fpstt = (fpus >> 11) & 7;
4662
    env->fpus = fpus & ~0x3800;
4663
    fptag ^= 0xff;
4664
    for(i = 0;i < 8; i++) {
4665
        env->fptags[i] = ((fptag >> i) & 1);
4666
    }
4667

    
4668
    addr = ptr + 0x20;
4669
    for(i = 0;i < 8; i++) {
4670
        tmp = helper_fldt(addr);
4671
        ST(i) = tmp;
4672
        addr += 16;
4673
    }
4674

    
4675
    if (env->cr[4] & CR4_OSFXSR_MASK) {
4676
        /* XXX: finish it */
4677
        env->mxcsr = ldl(ptr + 0x18);
4678
        //ldl(ptr + 0x1c);
4679
        if (env->hflags & HF_CS64_MASK)
4680
            nb_xmm_regs = 16;
4681
        else
4682
            nb_xmm_regs = 8;
4683
        addr = ptr + 0xa0;
4684
        /* Fast FXRESTORE leaves out the XMM registers */
4685
        if (!(env->efer & MSR_EFER_FFXSR)
4686
          || (env->hflags & HF_CPL_MASK)
4687
          || !(env->hflags & HF_LMA_MASK)) {
4688
            for(i = 0; i < nb_xmm_regs; i++) {
4689
                env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4690
                env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4691
                addr += 16;
4692
            }
4693
        }
4694
    }
4695
}
4696

    
4697
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4698
{
4699
    CPU_LDoubleU temp;
4700

    
4701
    temp.d = f;
4702
    *pmant = temp.l.lower;
4703
    *pexp = temp.l.upper;
4704
}
4705

    
4706
floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4707
{
4708
    CPU_LDoubleU temp;
4709

    
4710
    temp.l.upper = upper;
4711
    temp.l.lower = mant;
4712
    return temp.d;
4713
}
4714

    
4715
#ifdef TARGET_X86_64
4716

    
4717
//#define DEBUG_MULDIV
4718

    
4719
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4720
{
4721
    *plow += a;
4722
    /* carry test */
4723
    if (*plow < a)
4724
        (*phigh)++;
4725
    *phigh += b;
4726
}
4727

    
4728
static void neg128(uint64_t *plow, uint64_t *phigh)
4729
{
4730
    *plow = ~ *plow;
4731
    *phigh = ~ *phigh;
4732
    add128(plow, phigh, 1, 0);
4733
}
4734

    
4735
/* return TRUE if overflow */
4736
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4737
{
4738
    uint64_t q, r, a1, a0;
4739
    int i, qb, ab;
4740

    
4741
    a0 = *plow;
4742
    a1 = *phigh;
4743
    if (a1 == 0) {
4744
        q = a0 / b;
4745
        r = a0 % b;
4746
        *plow = q;
4747
        *phigh = r;
4748
    } else {
4749
        if (a1 >= b)
4750
            return 1;
4751
        /* XXX: use a better algorithm */
4752
        for(i = 0; i < 64; i++) {
4753
            ab = a1 >> 63;
4754
            a1 = (a1 << 1) | (a0 >> 63);
4755
            if (ab || a1 >= b) {
4756
                a1 -= b;
4757
                qb = 1;
4758
            } else {
4759
                qb = 0;
4760
            }
4761
            a0 = (a0 << 1) | qb;
4762
        }
4763
#if defined(DEBUG_MULDIV)
4764
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4765
               *phigh, *plow, b, a0, a1);
4766
#endif
4767
        *plow = a0;
4768
        *phigh = a1;
4769
    }
4770
    return 0;
4771
}
4772

    
4773
/* return TRUE if overflow */
4774
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4775
{
4776
    int sa, sb;
4777
    sa = ((int64_t)*phigh < 0);
4778
    if (sa)
4779
        neg128(plow, phigh);
4780
    sb = (b < 0);
4781
    if (sb)
4782
        b = -b;
4783
    if (div64(plow, phigh, b) != 0)
4784
        return 1;
4785
    if (sa ^ sb) {
4786
        if (*plow > (1ULL << 63))
4787
            return 1;
4788
        *plow = - *plow;
4789
    } else {
4790
        if (*plow >= (1ULL << 63))
4791
            return 1;
4792
    }
4793
    if (sa)
4794
        *phigh = - *phigh;
4795
    return 0;
4796
}
4797

    
4798
void helper_mulq_EAX_T0(target_ulong t0)
4799
{
4800
    uint64_t r0, r1;
4801

    
4802
    mulu64(&r0, &r1, EAX, t0);
4803
    EAX = r0;
4804
    EDX = r1;
4805
    CC_DST = r0;
4806
    CC_SRC = r1;
4807
}
4808

    
4809
void helper_imulq_EAX_T0(target_ulong t0)
4810
{
4811
    uint64_t r0, r1;
4812

    
4813
    muls64(&r0, &r1, EAX, t0);
4814
    EAX = r0;
4815
    EDX = r1;
4816
    CC_DST = r0;
4817
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4818
}
4819

    
4820
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4821
{
4822
    uint64_t r0, r1;
4823

    
4824
    muls64(&r0, &r1, t0, t1);
4825
    CC_DST = r0;
4826
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4827
    return r0;
4828
}
4829

    
4830
void helper_divq_EAX(target_ulong t0)
4831
{
4832
    uint64_t r0, r1;
4833
    if (t0 == 0) {
4834
        raise_exception(EXCP00_DIVZ);
4835
    }
4836
    r0 = EAX;
4837
    r1 = EDX;
4838
    if (div64(&r0, &r1, t0))
4839
        raise_exception(EXCP00_DIVZ);
4840
    EAX = r0;
4841
    EDX = r1;
4842
}
4843

    
4844
void helper_idivq_EAX(target_ulong t0)
4845
{
4846
    uint64_t r0, r1;
4847
    if (t0 == 0) {
4848
        raise_exception(EXCP00_DIVZ);
4849
    }
4850
    r0 = EAX;
4851
    r1 = EDX;
4852
    if (idiv64(&r0, &r1, t0))
4853
        raise_exception(EXCP00_DIVZ);
4854
    EAX = r0;
4855
    EDX = r1;
4856
}
4857
#endif
4858

    
4859
static void do_hlt(void)
4860
{
4861
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4862
    env->halted = 1;
4863
    env->exception_index = EXCP_HLT;
4864
    cpu_loop_exit(env);
4865
}
4866

    
4867
void helper_hlt(int next_eip_addend)
4868
{
4869
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4870
    EIP += next_eip_addend;
4871
    
4872
    do_hlt();
4873
}
4874

    
4875
void helper_monitor(target_ulong ptr)
4876
{
4877
    if ((uint32_t)ECX != 0)
4878
        raise_exception(EXCP0D_GPF);
4879
    /* XXX: store address ? */
4880
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4881
}
4882

    
4883
void helper_mwait(int next_eip_addend)
4884
{
4885
    if ((uint32_t)ECX != 0)
4886
        raise_exception(EXCP0D_GPF);
4887
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4888
    EIP += next_eip_addend;
4889

    
4890
    /* XXX: not complete but not completely erroneous */
4891
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
4892
        /* more than one CPU: do not sleep because another CPU may
4893
           wake this one */
4894
    } else {
4895
        do_hlt();
4896
    }
4897
}
4898

    
4899
void helper_debug(void)
4900
{
4901
    env->exception_index = EXCP_DEBUG;
4902
    cpu_loop_exit(env);
4903
}
4904

    
4905
void helper_reset_rf(void)
4906
{
4907
    env->eflags &= ~RF_MASK;
4908
}
4909

    
4910
void helper_raise_interrupt(int intno, int next_eip_addend)
4911
{
4912
    raise_interrupt(intno, 1, 0, next_eip_addend);
4913
}
4914

    
4915
void helper_raise_exception(int exception_index)
4916
{
4917
    raise_exception(exception_index);
4918
}
4919

    
4920
void helper_cli(void)
4921
{
4922
    env->eflags &= ~IF_MASK;
4923
}
4924

    
4925
void helper_sti(void)
4926
{
4927
    env->eflags |= IF_MASK;
4928
}
4929

    
4930
#if 0
4931
/* vm86plus instructions */
4932
void helper_cli_vm(void)
4933
{
4934
    env->eflags &= ~VIF_MASK;
4935
}
4936

4937
void helper_sti_vm(void)
4938
{
4939
    env->eflags |= VIF_MASK;
4940
    if (env->eflags & VIP_MASK) {
4941
        raise_exception(EXCP0D_GPF);
4942
    }
4943
}
4944
#endif
4945

    
4946
void helper_set_inhibit_irq(void)
4947
{
4948
    env->hflags |= HF_INHIBIT_IRQ_MASK;
4949
}
4950

    
4951
void helper_reset_inhibit_irq(void)
4952
{
4953
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4954
}
4955

    
4956
void helper_boundw(target_ulong a0, int v)
4957
{
4958
    int low, high;
4959
    low = ldsw(a0);
4960
    high = ldsw(a0 + 2);
4961
    v = (int16_t)v;
4962
    if (v < low || v > high) {
4963
        raise_exception(EXCP05_BOUND);
4964
    }
4965
}
4966

    
4967
void helper_boundl(target_ulong a0, int v)
4968
{
4969
    int low, high;
4970
    low = ldl(a0);
4971
    high = ldl(a0 + 4);
4972
    if (v < low || v > high) {
4973
        raise_exception(EXCP05_BOUND);
4974
    }
4975
}
4976

    
4977
#if !defined(CONFIG_USER_ONLY)
4978

    
4979
#define MMUSUFFIX _mmu
4980

    
4981
#define SHIFT 0
4982
#include "softmmu_template.h"
4983

    
4984
#define SHIFT 1
4985
#include "softmmu_template.h"
4986

    
4987
#define SHIFT 2
4988
#include "softmmu_template.h"
4989

    
4990
#define SHIFT 3
4991
#include "softmmu_template.h"
4992

    
4993
#endif
4994

    
4995
#if !defined(CONFIG_USER_ONLY)
4996
/* try to fill the TLB and return an exception if error. If retaddr is
4997
   NULL, it means that the function was called in C code (i.e. not
4998
   from generated code or from helper.c) */
4999
/* XXX: fix it to restore all registers */
5000
void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
5001
{
5002
    TranslationBlock *tb;
5003
    int ret;
5004
    unsigned long pc;
5005
    CPUX86State *saved_env;
5006

    
5007
    /* XXX: hack to restore env in all cases, even if not called from
5008
       generated code */
5009
    saved_env = env;
5010
    env = cpu_single_env;
5011

    
5012
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
5013
    if (ret) {
5014
        if (retaddr) {
5015
            /* now we have a real cpu fault */
5016
            pc = (unsigned long)retaddr;
5017
            tb = tb_find_pc(pc);
5018
            if (tb) {
5019
                /* the PC is inside the translated code. It means that we have
5020
                   a virtual CPU fault */
5021
                cpu_restore_state(tb, env, pc);
5022
            }
5023
        }
5024
        raise_exception_err(env->exception_index, env->error_code);
5025
    }
5026
    env = saved_env;
5027
}
5028
#endif
5029

    
5030
/* Secure Virtual Machine helpers */
5031

    
5032
#if defined(CONFIG_USER_ONLY)
5033

    
5034
void helper_vmrun(int aflag, int next_eip_addend)
5035
{ 
5036
}
5037
void helper_vmmcall(void) 
5038
{ 
5039
}
5040
void helper_vmload(int aflag)
5041
{ 
5042
}
5043
void helper_vmsave(int aflag)
5044
{ 
5045
}
5046
void helper_stgi(void)
5047
{
5048
}
5049
void helper_clgi(void)
5050
{
5051
}
5052
void helper_skinit(void) 
5053
{ 
5054
}
5055
void helper_invlpga(int aflag)
5056
{ 
5057
}
5058
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1) 
5059
{ 
5060
}
5061
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5062
{
5063
}
5064

    
5065
void svm_check_intercept(CPUState *env1, uint32_t type)
5066
{
5067
}
5068

    
5069
void helper_svm_check_io(uint32_t port, uint32_t param, 
5070
                         uint32_t next_eip_addend)
5071
{
5072
}
5073
#else
5074

    
5075
static inline void svm_save_seg(target_phys_addr_t addr,
5076
                                const SegmentCache *sc)
5077
{
5078
    stw_phys(addr + offsetof(struct vmcb_seg, selector), 
5079
             sc->selector);
5080
    stq_phys(addr + offsetof(struct vmcb_seg, base), 
5081
             sc->base);
5082
    stl_phys(addr + offsetof(struct vmcb_seg, limit), 
5083
             sc->limit);
5084
    stw_phys(addr + offsetof(struct vmcb_seg, attrib), 
5085
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
5086
}
5087
                                
5088
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
5089
{
5090
    unsigned int flags;
5091

    
5092
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5093
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5094
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5095
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5096
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
5097
}
5098

    
5099
static inline void svm_load_seg_cache(target_phys_addr_t addr, 
5100
                                      CPUState *env, int seg_reg)
5101
{
5102
    SegmentCache sc1, *sc = &sc1;
5103
    svm_load_seg(addr, sc);
5104
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5105
                           sc->base, sc->limit, sc->flags);
5106
}
5107

    
5108
void helper_vmrun(int aflag, int next_eip_addend)
5109
{
5110
    target_ulong addr;
5111
    uint32_t event_inj;
5112
    uint32_t int_ctl;
5113

    
5114
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5115

    
5116
    if (aflag == 2)
5117
        addr = EAX;
5118
    else
5119
        addr = (uint32_t)EAX;
5120

    
5121
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
5122

    
5123
    env->vm_vmcb = addr;
5124

    
5125
    /* save the current CPU state in the hsave page */
5126
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5127
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5128

    
5129
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5130
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5131

    
5132
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5133
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5134
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5135
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
5136
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5137
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5138

    
5139
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5140
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5141

    
5142
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es), 
5143
                  &env->segs[R_ES]);
5144
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs), 
5145
                 &env->segs[R_CS]);
5146
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss), 
5147
                 &env->segs[R_SS]);
5148
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds), 
5149
                 &env->segs[R_DS]);
5150

    
5151
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5152
             EIP + next_eip_addend);
5153
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5154
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5155

    
5156
    /* load the interception bitmaps so we do not need to access the
5157
       vmcb in svm mode */
5158
    env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5159
    env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5160
    env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5161
    env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5162
    env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5163
    env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5164

    
5165
    /* enable intercepts */
5166
    env->hflags |= HF_SVMI_MASK;
5167

    
5168
    env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5169

    
5170
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5171
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5172

    
5173
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5174
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5175

    
5176
    /* clear exit_info_2 so we behave like the real hardware */
5177
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5178

    
5179
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5180
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5181
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5182
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5183
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5184
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5185
    if (int_ctl & V_INTR_MASKING_MASK) {
5186
        env->v_tpr = int_ctl & V_TPR_MASK;
5187
        env->hflags2 |= HF2_VINTR_MASK;
5188
        if (env->eflags & IF_MASK)
5189
            env->hflags2 |= HF2_HIF_MASK;
5190
    }
5191

    
5192
    cpu_load_efer(env, 
5193
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5194
    env->eflags = 0;
5195
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5196
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5197
    CC_OP = CC_OP_EFLAGS;
5198

    
5199
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5200
                       env, R_ES);
5201
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5202
                       env, R_CS);
5203
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5204
                       env, R_SS);
5205
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5206
                       env, R_DS);
5207

    
5208
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5209
    env->eip = EIP;
5210
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5211
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5212
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5213
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5214
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5215

    
5216
    /* FIXME: guest state consistency checks */
5217

    
5218
    switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5219
        case TLB_CONTROL_DO_NOTHING:
5220
            break;
5221
        case TLB_CONTROL_FLUSH_ALL_ASID:
5222
            /* FIXME: this is not 100% correct but should work for now */
5223
            tlb_flush(env, 1);
5224
        break;
5225
    }
5226

    
5227
    env->hflags2 |= HF2_GIF_MASK;
5228

    
5229
    if (int_ctl & V_IRQ_MASK) {
5230
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5231
    }
5232

    
5233
    /* maybe we need to inject an event */
5234
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5235
    if (event_inj & SVM_EVTINJ_VALID) {
5236
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5237
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5238
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5239

    
5240
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5241
        /* FIXME: need to implement valid_err */
5242
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5243
        case SVM_EVTINJ_TYPE_INTR:
5244
                env->exception_index = vector;
5245
                env->error_code = event_inj_err;
5246
                env->exception_is_int = 0;
5247
                env->exception_next_eip = -1;
5248
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5249
                /* XXX: is it always correct ? */
5250
                do_interrupt_all(vector, 0, 0, 0, 1);
5251
                break;
5252
        case SVM_EVTINJ_TYPE_NMI:
5253
                env->exception_index = EXCP02_NMI;
5254
                env->error_code = event_inj_err;
5255
                env->exception_is_int = 0;
5256
                env->exception_next_eip = EIP;
5257
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5258
                cpu_loop_exit(env);
5259
                break;
5260
        case SVM_EVTINJ_TYPE_EXEPT:
5261
                env->exception_index = vector;
5262
                env->error_code = event_inj_err;
5263
                env->exception_is_int = 0;
5264
                env->exception_next_eip = -1;
5265
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5266
                cpu_loop_exit(env);
5267
                break;
5268
        case SVM_EVTINJ_TYPE_SOFT:
5269
                env->exception_index = vector;
5270
                env->error_code = event_inj_err;
5271
                env->exception_is_int = 1;
5272
                env->exception_next_eip = EIP;
5273
                qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5274
                cpu_loop_exit(env);
5275
                break;
5276
        }
5277
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5278
    }
5279
}
5280

    
5281
void helper_vmmcall(void)
5282
{
5283
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5284
    raise_exception(EXCP06_ILLOP);
5285
}
5286

    
5287
void helper_vmload(int aflag)
5288
{
5289
    target_ulong addr;
5290
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5291

    
5292
    if (aflag == 2)
5293
        addr = EAX;
5294
    else
5295
        addr = (uint32_t)EAX;
5296

    
5297
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5298
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5299
                env->segs[R_FS].base);
5300

    
5301
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5302
                       env, R_FS);
5303
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5304
                       env, R_GS);
5305
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5306
                 &env->tr);
5307
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5308
                 &env->ldt);
5309

    
5310
#ifdef TARGET_X86_64
5311
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5312
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5313
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5314
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5315
#endif
5316
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5317
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5318
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5319
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5320
}
5321

    
5322
void helper_vmsave(int aflag)
5323
{
5324
    target_ulong addr;
5325
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5326

    
5327
    if (aflag == 2)
5328
        addr = EAX;
5329
    else
5330
        addr = (uint32_t)EAX;
5331

    
5332
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5333
                addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5334
                env->segs[R_FS].base);
5335

    
5336
    svm_save_seg(addr + offsetof(struct vmcb, save.fs), 
5337
                 &env->segs[R_FS]);
5338
    svm_save_seg(addr + offsetof(struct vmcb, save.gs), 
5339
                 &env->segs[R_GS]);
5340
    svm_save_seg(addr + offsetof(struct vmcb, save.tr), 
5341
                 &env->tr);
5342
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr), 
5343
                 &env->ldt);
5344

    
5345
#ifdef TARGET_X86_64
5346
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5347
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5348
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5349
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5350
#endif
5351
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5352
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5353
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5354
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5355
}
5356

    
5357
void helper_stgi(void)
5358
{
5359
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5360
    env->hflags2 |= HF2_GIF_MASK;
5361
}
5362

    
5363
void helper_clgi(void)
5364
{
5365
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5366
    env->hflags2 &= ~HF2_GIF_MASK;
5367
}
5368

    
5369
void helper_skinit(void)
5370
{
5371
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5372
    /* XXX: not implemented */
5373
    raise_exception(EXCP06_ILLOP);
5374
}
5375

    
5376
void helper_invlpga(int aflag)
5377
{
5378
    target_ulong addr;
5379
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5380
    
5381
    if (aflag == 2)
5382
        addr = EAX;
5383
    else
5384
        addr = (uint32_t)EAX;
5385

    
5386
    /* XXX: could use the ASID to see if it is needed to do the
5387
       flush */
5388
    tlb_flush_page(env, addr);
5389
}
5390

    
5391
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5392
{
5393
    if (likely(!(env->hflags & HF_SVMI_MASK)))
5394
        return;
5395
    switch(type) {
5396
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5397
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5398
            helper_vmexit(type, param);
5399
        }
5400
        break;
5401
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5402
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5403
            helper_vmexit(type, param);
5404
        }
5405
        break;
5406
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5407
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5408
            helper_vmexit(type, param);
5409
        }
5410
        break;
5411
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5412
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5413
            helper_vmexit(type, param);
5414
        }
5415
        break;
5416
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5417
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5418
            helper_vmexit(type, param);
5419
        }
5420
        break;
5421
    case SVM_EXIT_MSR:
5422
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5423
            /* FIXME: this should be read in at vmrun (faster this way?) */
5424
            uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5425
            uint32_t t0, t1;
5426
            switch((uint32_t)ECX) {
5427
            case 0 ... 0x1fff:
5428
                t0 = (ECX * 2) % 8;
5429
                t1 = (ECX * 2) / 8;
5430
                break;
5431
            case 0xc0000000 ... 0xc0001fff:
5432
                t0 = (8192 + ECX - 0xc0000000) * 2;
5433
                t1 = (t0 / 8);
5434
                t0 %= 8;
5435
                break;
5436
            case 0xc0010000 ... 0xc0011fff:
5437
                t0 = (16384 + ECX - 0xc0010000) * 2;
5438
                t1 = (t0 / 8);
5439
                t0 %= 8;
5440
                break;
5441
            default:
5442
                helper_vmexit(type, param);
5443
                t0 = 0;
5444
                t1 = 0;
5445
                break;
5446
            }
5447
            if (ldub_phys(addr + t1) & ((1 << param) << t0))
5448
                helper_vmexit(type, param);
5449
        }
5450
        break;
5451
    default:
5452
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5453
            helper_vmexit(type, param);
5454
        }
5455
        break;
5456
    }
5457
}
5458

    
5459
void svm_check_intercept(CPUState *env1, uint32_t type)
5460
{
5461
    CPUState *saved_env;
5462

    
5463
    saved_env = env;
5464
    env = env1;
5465
    helper_svm_check_intercept_param(type, 0);
5466
    env = saved_env;
5467
}
5468

    
5469
void helper_svm_check_io(uint32_t port, uint32_t param, 
5470
                         uint32_t next_eip_addend)
5471
{
5472
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5473
        /* FIXME: this should be read in at vmrun (faster this way?) */
5474
        uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5475
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5476
        if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5477
            /* next EIP */
5478
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
5479
                     env->eip + next_eip_addend);
5480
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5481
        }
5482
    }
5483
}
5484

    
5485
/* Note: currently only 32 bits of exit_code are used */
5486
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5487
{
5488
    uint32_t int_ctl;
5489

    
5490
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5491
                exit_code, exit_info_1,
5492
                ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5493
                EIP);
5494

    
5495
    if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5496
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5497
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5498
    } else {
5499
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5500
    }
5501

    
5502
    /* Save the VM state in the vmcb */
5503
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es), 
5504
                 &env->segs[R_ES]);
5505
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs), 
5506
                 &env->segs[R_CS]);
5507
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss), 
5508
                 &env->segs[R_SS]);
5509
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds), 
5510
                 &env->segs[R_DS]);
5511

    
5512
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5513
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5514

    
5515
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5516
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5517

    
5518
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5519
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5520
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5521
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5522
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5523

    
5524
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5525
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5526
    int_ctl |= env->v_tpr & V_TPR_MASK;
5527
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5528
        int_ctl |= V_IRQ_MASK;
5529
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5530

    
5531
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5532
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5533
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5534
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5535
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5536
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5537
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5538

    
5539
    /* Reload the host state from vm_hsave */
5540
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5541
    env->hflags &= ~HF_SVMI_MASK;
5542
    env->intercept = 0;
5543
    env->intercept_exceptions = 0;
5544
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5545
    env->tsc_offset = 0;
5546

    
5547
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5548
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5549

    
5550
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5551
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5552

    
5553
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5554
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5555
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5556
    /* we need to set the efer after the crs so the hidden flags get
5557
       set properly */
5558
    cpu_load_efer(env, 
5559
                  ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5560
    env->eflags = 0;
5561
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5562
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5563
    CC_OP = CC_OP_EFLAGS;
5564

    
5565
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5566
                       env, R_ES);
5567
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5568
                       env, R_CS);
5569
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5570
                       env, R_SS);
5571
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5572
                       env, R_DS);
5573

    
5574
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5575
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5576
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5577

    
5578
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5579
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5580

    
5581
    /* other setups */
5582
    cpu_x86_set_cpl(env, 0);
5583
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5584
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5585

    
5586
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5587
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5588
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5589
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5590
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5591

    
5592
    env->hflags2 &= ~HF2_GIF_MASK;
5593
    /* FIXME: Resets the current ASID register to zero (host ASID). */
5594

    
5595
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5596

    
5597
    /* Clears the TSC_OFFSET inside the processor. */
5598

    
5599
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
5600
       from the page table indicated the host's CR3. If the PDPEs contain
5601
       illegal state, the processor causes a shutdown. */
5602

    
5603
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5604
    env->cr[0] |= CR0_PE_MASK;
5605
    env->eflags &= ~VM_MASK;
5606

    
5607
    /* Disables all breakpoints in the host DR7 register. */
5608

    
5609
    /* Checks the reloaded host state for consistency. */
5610

    
5611
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5612
       host's code segment or non-canonical (in the case of long mode), a
5613
       #GP fault is delivered inside the host.) */
5614

    
5615
    /* remove any pending exception */
5616
    env->exception_index = -1;
5617
    env->error_code = 0;
5618
    env->old_exception = -1;
5619

    
5620
    cpu_loop_exit(env);
5621
}
5622

    
5623
#endif
5624

    
5625
/* MMX/SSE */
5626
/* XXX: optimize by storing fptt and fptags in the static cpu state */
5627
void helper_enter_mmx(void)
5628
{
5629
    env->fpstt = 0;
5630
    *(uint32_t *)(env->fptags) = 0;
5631
    *(uint32_t *)(env->fptags + 4) = 0;
5632
}
5633

    
5634
void helper_emms(void)
5635
{
5636
    /* set to empty state */
5637
    *(uint32_t *)(env->fptags) = 0x01010101;
5638
    *(uint32_t *)(env->fptags + 4) = 0x01010101;
5639
}
5640

    
5641
/* XXX: suppress */
5642
void helper_movq(void *d, void *s)
5643
{
5644
    *(uint64_t *)d = *(uint64_t *)s;
5645
}
5646

    
5647
#define SHIFT 0
5648
#include "ops_sse.h"
5649

    
5650
#define SHIFT 1
5651
#include "ops_sse.h"
5652

    
5653
#define SHIFT 0
5654
#include "helper_template.h"
5655
#undef SHIFT
5656

    
5657
#define SHIFT 1
5658
#include "helper_template.h"
5659
#undef SHIFT
5660

    
5661
#define SHIFT 2
5662
#include "helper_template.h"
5663
#undef SHIFT
5664

    
5665
#ifdef TARGET_X86_64
5666

    
5667
#define SHIFT 3
5668
#include "helper_template.h"
5669
#undef SHIFT
5670

    
5671
#endif
5672

    
5673
/* bit operations */
5674
target_ulong helper_bsf(target_ulong t0)
5675
{
5676
    int count;
5677
    target_ulong res;
5678

    
5679
    res = t0;
5680
    count = 0;
5681
    while ((res & 1) == 0) {
5682
        count++;
5683
        res >>= 1;
5684
    }
5685
    return count;
5686
}
5687

    
5688
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5689
{
5690
    int count;
5691
    target_ulong res, mask;
5692

    
5693
    if (wordsize > 0 && t0 == 0) {
5694
        return wordsize;
5695
    }
5696
    res = t0;
5697
    count = TARGET_LONG_BITS - 1;
5698
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5699
    while ((res & mask) == 0) {
5700
        count--;
5701
        res <<= 1;
5702
    }
5703
    if (wordsize > 0) {
5704
        return wordsize - 1 - count;
5705
    }
5706
    return count;
5707
}
5708

    
5709
target_ulong helper_bsr(target_ulong t0)
5710
{
5711
        return helper_lzcnt(t0, 0);
5712
}
5713

    
5714
static int compute_all_eflags(void)
5715
{
5716
    return CC_SRC;
5717
}
5718

    
5719
static int compute_c_eflags(void)
5720
{
5721
    return CC_SRC & CC_C;
5722
}
5723

    
5724
uint32_t helper_cc_compute_all(int op)
5725
{
5726
    switch (op) {
5727
    default: /* should never happen */ return 0;
5728

    
5729
    case CC_OP_EFLAGS: return compute_all_eflags();
5730

    
5731
    case CC_OP_MULB: return compute_all_mulb();
5732
    case CC_OP_MULW: return compute_all_mulw();
5733
    case CC_OP_MULL: return compute_all_mull();
5734

    
5735
    case CC_OP_ADDB: return compute_all_addb();
5736
    case CC_OP_ADDW: return compute_all_addw();
5737
    case CC_OP_ADDL: return compute_all_addl();
5738

    
5739
    case CC_OP_ADCB: return compute_all_adcb();
5740
    case CC_OP_ADCW: return compute_all_adcw();
5741
    case CC_OP_ADCL: return compute_all_adcl();
5742

    
5743
    case CC_OP_SUBB: return compute_all_subb();
5744
    case CC_OP_SUBW: return compute_all_subw();
5745
    case CC_OP_SUBL: return compute_all_subl();
5746

    
5747
    case CC_OP_SBBB: return compute_all_sbbb();
5748
    case CC_OP_SBBW: return compute_all_sbbw();
5749
    case CC_OP_SBBL: return compute_all_sbbl();
5750

    
5751
    case CC_OP_LOGICB: return compute_all_logicb();
5752
    case CC_OP_LOGICW: return compute_all_logicw();
5753
    case CC_OP_LOGICL: return compute_all_logicl();
5754

    
5755
    case CC_OP_INCB: return compute_all_incb();
5756
    case CC_OP_INCW: return compute_all_incw();
5757
    case CC_OP_INCL: return compute_all_incl();
5758

    
5759
    case CC_OP_DECB: return compute_all_decb();
5760
    case CC_OP_DECW: return compute_all_decw();
5761
    case CC_OP_DECL: return compute_all_decl();
5762

    
5763
    case CC_OP_SHLB: return compute_all_shlb();
5764
    case CC_OP_SHLW: return compute_all_shlw();
5765
    case CC_OP_SHLL: return compute_all_shll();
5766

    
5767
    case CC_OP_SARB: return compute_all_sarb();
5768
    case CC_OP_SARW: return compute_all_sarw();
5769
    case CC_OP_SARL: return compute_all_sarl();
5770

    
5771
#ifdef TARGET_X86_64
5772
    case CC_OP_MULQ: return compute_all_mulq();
5773

    
5774
    case CC_OP_ADDQ: return compute_all_addq();
5775

    
5776
    case CC_OP_ADCQ: return compute_all_adcq();
5777

    
5778
    case CC_OP_SUBQ: return compute_all_subq();
5779

    
5780
    case CC_OP_SBBQ: return compute_all_sbbq();
5781

    
5782
    case CC_OP_LOGICQ: return compute_all_logicq();
5783

    
5784
    case CC_OP_INCQ: return compute_all_incq();
5785

    
5786
    case CC_OP_DECQ: return compute_all_decq();
5787

    
5788
    case CC_OP_SHLQ: return compute_all_shlq();
5789

    
5790
    case CC_OP_SARQ: return compute_all_sarq();
5791
#endif
5792
    }
5793
}
5794

    
5795
uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5796
{
5797
    CPUState *saved_env;
5798
    uint32_t ret;
5799

    
5800
    saved_env = env;
5801
    env = env1;
5802
    ret = helper_cc_compute_all(op);
5803
    env = saved_env;
5804
    return ret;
5805
}
5806

    
5807
uint32_t helper_cc_compute_c(int op)
5808
{
5809
    switch (op) {
5810
    default: /* should never happen */ return 0;
5811

    
5812
    case CC_OP_EFLAGS: return compute_c_eflags();
5813

    
5814
    case CC_OP_MULB: return compute_c_mull();
5815
    case CC_OP_MULW: return compute_c_mull();
5816
    case CC_OP_MULL: return compute_c_mull();
5817

    
5818
    case CC_OP_ADDB: return compute_c_addb();
5819
    case CC_OP_ADDW: return compute_c_addw();
5820
    case CC_OP_ADDL: return compute_c_addl();
5821

    
5822
    case CC_OP_ADCB: return compute_c_adcb();
5823
    case CC_OP_ADCW: return compute_c_adcw();
5824
    case CC_OP_ADCL: return compute_c_adcl();
5825

    
5826
    case CC_OP_SUBB: return compute_c_subb();
5827
    case CC_OP_SUBW: return compute_c_subw();
5828
    case CC_OP_SUBL: return compute_c_subl();
5829

    
5830
    case CC_OP_SBBB: return compute_c_sbbb();
5831
    case CC_OP_SBBW: return compute_c_sbbw();
5832
    case CC_OP_SBBL: return compute_c_sbbl();
5833

    
5834
    case CC_OP_LOGICB: return compute_c_logicb();
5835
    case CC_OP_LOGICW: return compute_c_logicw();
5836
    case CC_OP_LOGICL: return compute_c_logicl();
5837

    
5838
    case CC_OP_INCB: return compute_c_incl();
5839
    case CC_OP_INCW: return compute_c_incl();
5840
    case CC_OP_INCL: return compute_c_incl();
5841

    
5842
    case CC_OP_DECB: return compute_c_incl();
5843
    case CC_OP_DECW: return compute_c_incl();
5844
    case CC_OP_DECL: return compute_c_incl();
5845

    
5846
    case CC_OP_SHLB: return compute_c_shlb();
5847
    case CC_OP_SHLW: return compute_c_shlw();
5848
    case CC_OP_SHLL: return compute_c_shll();
5849

    
5850
    case CC_OP_SARB: return compute_c_sarl();
5851
    case CC_OP_SARW: return compute_c_sarl();
5852
    case CC_OP_SARL: return compute_c_sarl();
5853

    
5854
#ifdef TARGET_X86_64
5855
    case CC_OP_MULQ: return compute_c_mull();
5856

    
5857
    case CC_OP_ADDQ: return compute_c_addq();
5858

    
5859
    case CC_OP_ADCQ: return compute_c_adcq();
5860

    
5861
    case CC_OP_SUBQ: return compute_c_subq();
5862

    
5863
    case CC_OP_SBBQ: return compute_c_sbbq();
5864

    
5865
    case CC_OP_LOGICQ: return compute_c_logicq();
5866

    
5867
    case CC_OP_INCQ: return compute_c_incl();
5868

    
5869
    case CC_OP_DECQ: return compute_c_incl();
5870

    
5871
    case CC_OP_SHLQ: return compute_c_shlq();
5872

    
5873
    case CC_OP_SARQ: return compute_c_sarl();
5874
#endif
5875
    }
5876
}