Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 38de4c46

History | View | Annotate | Download (145.9 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24
#include "qemu-log.h"
25
#include "cpu-defs.h"
26
#include "helper.h"
27

    
28
#if !defined(CONFIG_USER_ONLY)
29
#include "softmmu_exec.h"
30
#endif /* !defined(CONFIG_USER_ONLY) */
31

    
32
//#define DEBUG_PCALL
33
//#define DEBUG_MULDIV
34

    
35
#ifdef DEBUG_PCALL
36
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37
# define LOG_PCALL_STATE(env)                                  \
38
    log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
39
#else
40
# define LOG_PCALL(...) do { } while (0)
41
# define LOG_PCALL_STATE(env) do { } while (0)
42
#endif
43

    
44
/* n must be a constant to be efficient */
45
static inline target_long lshift(target_long x, int n)
46
{
47
    if (n >= 0) {
48
        return x << n;
49
    } else {
50
        return x >> (-n);
51
    }
52
}
53

    
54
static inline uint32_t compute_eflags(void)
55
{
56
    return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
57
}
58

    
59
/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
60
static inline void load_eflags(int eflags, int update_mask)
61
{
62
    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
63
    DF = 1 - (2 * ((eflags >> 10) & 1));
64
    env->eflags = (env->eflags & ~update_mask) |
65
        (eflags & update_mask) | 0x2;
66
}
67

    
68
/* load efer and update the corresponding hflags. XXX: do consistency
69
   checks with cpuid bits? */
70
static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
71
{
72
    env->efer = val;
73
    env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
74
    if (env->efer & MSR_EFER_LMA) {
75
        env->hflags |= HF_LMA_MASK;
76
    }
77
    if (env->efer & MSR_EFER_SVME) {
78
        env->hflags |= HF_SVME_MASK;
79
    }
80
}
81

    
82
static const uint8_t parity_table[256] = {
83
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
84
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
85
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
86
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
87
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
88
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
89
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
91
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
92
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
93
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
94
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
95
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
96
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
97
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
98
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
99
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
100
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
101
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
102
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
103
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
104
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
105
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
106
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
107
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
108
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
109
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
110
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
111
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
112
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
113
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
114
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
115
};
116

    
117
/* modulo 17 table */
118
static const uint8_t rclw_table[32] = {
119
    0, 1, 2, 3, 4, 5, 6, 7,
120
    8, 9, 10, 11, 12, 13, 14, 15,
121
    16, 0, 1, 2, 3, 4, 5, 6,
122
    7, 8, 9, 10, 11, 12, 13, 14,
123
};
124

    
125
/* modulo 9 table */
126
static const uint8_t rclb_table[32] = {
127
    0, 1, 2, 3, 4, 5, 6, 7,
128
    8, 0, 1, 2, 3, 4, 5, 6,
129
    7, 8, 0, 1, 2, 3, 4, 5,
130
    6, 7, 8, 0, 1, 2, 3, 4,
131
};
132

    
133
/* broken thread support */
134

    
135
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
136

    
137
void helper_lock(void)
138
{
139
    spin_lock(&global_cpu_lock);
140
}
141

    
142
void helper_unlock(void)
143
{
144
    spin_unlock(&global_cpu_lock);
145
}
146

    
147
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
148
{
149
    load_eflags(t0, update_mask);
150
}
151

    
152
target_ulong helper_read_eflags(void)
153
{
154
    uint32_t eflags;
155

    
156
    eflags = helper_cc_compute_all(CC_OP);
157
    eflags |= (DF & DF_MASK);
158
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
159
    return eflags;
160
}
161

    
162
/* return non zero if error */
163
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
164
                               int selector)
165
{
166
    SegmentCache *dt;
167
    int index;
168
    target_ulong ptr;
169

    
170
    if (selector & 0x4) {
171
        dt = &env->ldt;
172
    } else {
173
        dt = &env->gdt;
174
    }
175
    index = selector & ~7;
176
    if ((index + 7) > dt->limit) {
177
        return -1;
178
    }
179
    ptr = dt->base + index;
180
    *e1_ptr = ldl_kernel(ptr);
181
    *e2_ptr = ldl_kernel(ptr + 4);
182
    return 0;
183
}
184

    
185
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
186
{
187
    unsigned int limit;
188

    
189
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
190
    if (e2 & DESC_G_MASK) {
191
        limit = (limit << 12) | 0xfff;
192
    }
193
    return limit;
194
}
195

    
196
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
197
{
198
    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
199
}
200

    
201
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
202
                                         uint32_t e2)
203
{
204
    sc->base = get_seg_base(e1, e2);
205
    sc->limit = get_seg_limit(e1, e2);
206
    sc->flags = e2;
207
}
208

    
209
/* init the segment cache in vm86 mode. */
210
static inline void load_seg_vm(int seg, int selector)
211
{
212
    selector &= 0xffff;
213
    cpu_x86_load_seg_cache(env, seg, selector,
214
                           (selector << 4), 0xffff, 0);
215
}
216

    
217
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
218
                                       uint32_t *esp_ptr, int dpl)
219
{
220
    int type, index, shift;
221

    
222
#if 0
223
    {
224
        int i;
225
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
226
        for (i = 0; i < env->tr.limit; i++) {
227
            printf("%02x ", env->tr.base[i]);
228
            if ((i & 7) == 7) {
229
                printf("\n");
230
            }
231
        }
232
        printf("\n");
233
    }
234
#endif
235

    
236
    if (!(env->tr.flags & DESC_P_MASK)) {
237
        cpu_abort(env, "invalid tss");
238
    }
239
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
240
    if ((type & 7) != 1) {
241
        cpu_abort(env, "invalid tss type");
242
    }
243
    shift = type >> 3;
244
    index = (dpl * 4 + 2) << shift;
245
    if (index + (4 << shift) - 1 > env->tr.limit) {
246
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
247
    }
248
    if (shift == 0) {
249
        *esp_ptr = lduw_kernel(env->tr.base + index);
250
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
251
    } else {
252
        *esp_ptr = ldl_kernel(env->tr.base + index);
253
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
254
    }
255
}
256

    
257
/* XXX: merge with load_seg() */
258
static void tss_load_seg(int seg_reg, int selector)
259
{
260
    uint32_t e1, e2;
261
    int rpl, dpl, cpl;
262

    
263
    if ((selector & 0xfffc) != 0) {
264
        if (load_segment(&e1, &e2, selector) != 0) {
265
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
266
        }
267
        if (!(e2 & DESC_S_MASK)) {
268
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
269
        }
270
        rpl = selector & 3;
271
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
272
        cpl = env->hflags & HF_CPL_MASK;
273
        if (seg_reg == R_CS) {
274
            if (!(e2 & DESC_CS_MASK)) {
275
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
276
            }
277
            /* XXX: is it correct? */
278
            if (dpl != rpl) {
279
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
280
            }
281
            if ((e2 & DESC_C_MASK) && dpl > rpl) {
282
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
283
            }
284
        } else if (seg_reg == R_SS) {
285
            /* SS must be writable data */
286
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
287
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
288
            }
289
            if (dpl != cpl || dpl != rpl) {
290
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
291
            }
292
        } else {
293
            /* not readable code */
294
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
295
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
296
            }
297
            /* if data or non conforming code, checks the rights */
298
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
299
                if (dpl < cpl || dpl < rpl) {
300
                    raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
301
                }
302
            }
303
        }
304
        if (!(e2 & DESC_P_MASK)) {
305
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
306
        }
307
        cpu_x86_load_seg_cache(env, seg_reg, selector,
308
                               get_seg_base(e1, e2),
309
                               get_seg_limit(e1, e2),
310
                               e2);
311
    } else {
312
        if (seg_reg == R_SS || seg_reg == R_CS) {
313
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
314
        }
315
    }
316
}
317

    
318
#define SWITCH_TSS_JMP  0
319
#define SWITCH_TSS_IRET 1
320
#define SWITCH_TSS_CALL 2
321

    
322
/* XXX: restore CPU state in registers (PowerPC case) */
323
static void switch_tss(int tss_selector,
324
                       uint32_t e1, uint32_t e2, int source,
325
                       uint32_t next_eip)
326
{
327
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
328
    target_ulong tss_base;
329
    uint32_t new_regs[8], new_segs[6];
330
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
331
    uint32_t old_eflags, eflags_mask;
332
    SegmentCache *dt;
333
    int index;
334
    target_ulong ptr;
335

    
336
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
337
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
338
              source);
339

    
340
    /* if task gate, we read the TSS segment and we load it */
341
    if (type == 5) {
342
        if (!(e2 & DESC_P_MASK)) {
343
            raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
344
        }
345
        tss_selector = e1 >> 16;
346
        if (tss_selector & 4) {
347
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
348
        }
349
        if (load_segment(&e1, &e2, tss_selector) != 0) {
350
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
351
        }
352
        if (e2 & DESC_S_MASK) {
353
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
354
        }
355
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
356
        if ((type & 7) != 1) {
357
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
358
        }
359
    }
360

    
361
    if (!(e2 & DESC_P_MASK)) {
362
        raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
363
    }
364

    
365
    if (type & 8) {
366
        tss_limit_max = 103;
367
    } else {
368
        tss_limit_max = 43;
369
    }
370
    tss_limit = get_seg_limit(e1, e2);
371
    tss_base = get_seg_base(e1, e2);
372
    if ((tss_selector & 4) != 0 ||
373
        tss_limit < tss_limit_max) {
374
        raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
375
    }
376
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
377
    if (old_type & 8) {
378
        old_tss_limit_max = 103;
379
    } else {
380
        old_tss_limit_max = 43;
381
    }
382

    
383
    /* read all the registers from the new TSS */
384
    if (type & 8) {
385
        /* 32 bit */
386
        new_cr3 = ldl_kernel(tss_base + 0x1c);
387
        new_eip = ldl_kernel(tss_base + 0x20);
388
        new_eflags = ldl_kernel(tss_base + 0x24);
389
        for (i = 0; i < 8; i++) {
390
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
391
        }
392
        for (i = 0; i < 6; i++) {
393
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
394
        }
395
        new_ldt = lduw_kernel(tss_base + 0x60);
396
        new_trap = ldl_kernel(tss_base + 0x64);
397
    } else {
398
        /* 16 bit */
399
        new_cr3 = 0;
400
        new_eip = lduw_kernel(tss_base + 0x0e);
401
        new_eflags = lduw_kernel(tss_base + 0x10);
402
        for (i = 0; i < 8; i++) {
403
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
404
        }
405
        for (i = 0; i < 4; i++) {
406
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
407
        }
408
        new_ldt = lduw_kernel(tss_base + 0x2a);
409
        new_segs[R_FS] = 0;
410
        new_segs[R_GS] = 0;
411
        new_trap = 0;
412
    }
413
    /* XXX: avoid a compiler warning, see
414
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
415
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
416
    (void)new_trap;
417

    
418
    /* NOTE: we must avoid memory exceptions during the task switch,
419
       so we make dummy accesses before */
420
    /* XXX: it can still fail in some cases, so a bigger hack is
421
       necessary to valid the TLB after having done the accesses */
422

    
423
    v1 = ldub_kernel(env->tr.base);
424
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
425
    stb_kernel(env->tr.base, v1);
426
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
427

    
428
    /* clear busy bit (it is restartable) */
429
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
430
        target_ulong ptr;
431
        uint32_t e2;
432

    
433
        ptr = env->gdt.base + (env->tr.selector & ~7);
434
        e2 = ldl_kernel(ptr + 4);
435
        e2 &= ~DESC_TSS_BUSY_MASK;
436
        stl_kernel(ptr + 4, e2);
437
    }
438
    old_eflags = compute_eflags();
439
    if (source == SWITCH_TSS_IRET) {
440
        old_eflags &= ~NT_MASK;
441
    }
442

    
443
    /* save the current state in the old TSS */
444
    if (type & 8) {
445
        /* 32 bit */
446
        stl_kernel(env->tr.base + 0x20, next_eip);
447
        stl_kernel(env->tr.base + 0x24, old_eflags);
448
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
449
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
450
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
451
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
452
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
453
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
454
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
455
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
456
        for (i = 0; i < 6; i++) {
457
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
458
        }
459
    } else {
460
        /* 16 bit */
461
        stw_kernel(env->tr.base + 0x0e, next_eip);
462
        stw_kernel(env->tr.base + 0x10, old_eflags);
463
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
464
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
465
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
466
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
467
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
468
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
469
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
470
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
471
        for (i = 0; i < 4; i++) {
472
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
473
        }
474
    }
475

    
476
    /* now if an exception occurs, it will occurs in the next task
477
       context */
478

    
479
    if (source == SWITCH_TSS_CALL) {
480
        stw_kernel(tss_base, env->tr.selector);
481
        new_eflags |= NT_MASK;
482
    }
483

    
484
    /* set busy bit */
485
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
486
        target_ulong ptr;
487
        uint32_t e2;
488

    
489
        ptr = env->gdt.base + (tss_selector & ~7);
490
        e2 = ldl_kernel(ptr + 4);
491
        e2 |= DESC_TSS_BUSY_MASK;
492
        stl_kernel(ptr + 4, e2);
493
    }
494

    
495
    /* set the new CPU state */
496
    /* from this point, any exception which occurs can give problems */
497
    env->cr[0] |= CR0_TS_MASK;
498
    env->hflags |= HF_TS_MASK;
499
    env->tr.selector = tss_selector;
500
    env->tr.base = tss_base;
501
    env->tr.limit = tss_limit;
502
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
503

    
504
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
505
        cpu_x86_update_cr3(env, new_cr3);
506
    }
507

    
508
    /* load all registers without an exception, then reload them with
509
       possible exception */
510
    env->eip = new_eip;
511
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
512
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
513
    if (!(type & 8)) {
514
        eflags_mask &= 0xffff;
515
    }
516
    load_eflags(new_eflags, eflags_mask);
517
    /* XXX: what to do in 16 bit case? */
518
    EAX = new_regs[0];
519
    ECX = new_regs[1];
520
    EDX = new_regs[2];
521
    EBX = new_regs[3];
522
    ESP = new_regs[4];
523
    EBP = new_regs[5];
524
    ESI = new_regs[6];
525
    EDI = new_regs[7];
526
    if (new_eflags & VM_MASK) {
527
        for (i = 0; i < 6; i++) {
528
            load_seg_vm(i, new_segs[i]);
529
        }
530
        /* in vm86, CPL is always 3 */
531
        cpu_x86_set_cpl(env, 3);
532
    } else {
533
        /* CPL is set the RPL of CS */
534
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
535
        /* first just selectors as the rest may trigger exceptions */
536
        for (i = 0; i < 6; i++) {
537
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
538
        }
539
    }
540

    
541
    env->ldt.selector = new_ldt & ~4;
542
    env->ldt.base = 0;
543
    env->ldt.limit = 0;
544
    env->ldt.flags = 0;
545

    
546
    /* load the LDT */
547
    if (new_ldt & 4) {
548
        raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
549
    }
550

    
551
    if ((new_ldt & 0xfffc) != 0) {
552
        dt = &env->gdt;
553
        index = new_ldt & ~7;
554
        if ((index + 7) > dt->limit) {
555
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
556
        }
557
        ptr = dt->base + index;
558
        e1 = ldl_kernel(ptr);
559
        e2 = ldl_kernel(ptr + 4);
560
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
561
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
562
        }
563
        if (!(e2 & DESC_P_MASK)) {
564
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
565
        }
566
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
567
    }
568

    
569
    /* load the segments */
570
    if (!(new_eflags & VM_MASK)) {
571
        tss_load_seg(R_CS, new_segs[R_CS]);
572
        tss_load_seg(R_SS, new_segs[R_SS]);
573
        tss_load_seg(R_ES, new_segs[R_ES]);
574
        tss_load_seg(R_DS, new_segs[R_DS]);
575
        tss_load_seg(R_FS, new_segs[R_FS]);
576
        tss_load_seg(R_GS, new_segs[R_GS]);
577
    }
578

    
579
    /* check that EIP is in the CS segment limits */
580
    if (new_eip > env->segs[R_CS].limit) {
581
        /* XXX: different exception if CALL? */
582
        raise_exception_err(env, EXCP0D_GPF, 0);
583
    }
584

    
585
#ifndef CONFIG_USER_ONLY
586
    /* reset local breakpoints */
587
    if (env->dr[7] & 0x55) {
588
        for (i = 0; i < 4; i++) {
589
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
590
                hw_breakpoint_remove(env, i);
591
            }
592
        }
593
        env->dr[7] &= ~0x55;
594
    }
595
#endif
596
}
597

    
598
/* check if Port I/O is allowed in TSS */
599
static inline void check_io(int addr, int size)
600
{
601
    int io_offset, val, mask;
602

    
603
    /* TSS must be a valid 32 bit one */
604
    if (!(env->tr.flags & DESC_P_MASK) ||
605
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
606
        env->tr.limit < 103) {
607
        goto fail;
608
    }
609
    io_offset = lduw_kernel(env->tr.base + 0x66);
610
    io_offset += (addr >> 3);
611
    /* Note: the check needs two bytes */
612
    if ((io_offset + 1) > env->tr.limit) {
613
        goto fail;
614
    }
615
    val = lduw_kernel(env->tr.base + io_offset);
616
    val >>= (addr & 7);
617
    mask = (1 << size) - 1;
618
    /* all bits must be zero to allow the I/O */
619
    if ((val & mask) != 0) {
620
    fail:
621
        raise_exception_err(env, EXCP0D_GPF, 0);
622
    }
623
}
624

    
625
void helper_check_iob(uint32_t t0)
626
{
627
    check_io(t0, 1);
628
}
629

    
630
void helper_check_iow(uint32_t t0)
631
{
632
    check_io(t0, 2);
633
}
634

    
635
void helper_check_iol(uint32_t t0)
636
{
637
    check_io(t0, 4);
638
}
639

    
640
void helper_outb(uint32_t port, uint32_t data)
641
{
642
    cpu_outb(port, data & 0xff);
643
}
644

    
645
target_ulong helper_inb(uint32_t port)
646
{
647
    return cpu_inb(port);
648
}
649

    
650
void helper_outw(uint32_t port, uint32_t data)
651
{
652
    cpu_outw(port, data & 0xffff);
653
}
654

    
655
target_ulong helper_inw(uint32_t port)
656
{
657
    return cpu_inw(port);
658
}
659

    
660
void helper_outl(uint32_t port, uint32_t data)
661
{
662
    cpu_outl(port, data);
663
}
664

    
665
target_ulong helper_inl(uint32_t port)
666
{
667
    return cpu_inl(port);
668
}
669

    
670
static inline unsigned int get_sp_mask(unsigned int e2)
671
{
672
    if (e2 & DESC_B_MASK) {
673
        return 0xffffffff;
674
    } else {
675
        return 0xffff;
676
    }
677
}
678

    
679
static int exception_has_error_code(int intno)
680
{
681
    switch (intno) {
682
    case 8:
683
    case 10:
684
    case 11:
685
    case 12:
686
    case 13:
687
    case 14:
688
    case 17:
689
        return 1;
690
    }
691
    return 0;
692
}
693

    
694
#ifdef TARGET_X86_64
695
#define SET_ESP(val, sp_mask)                           \
696
    do {                                                \
697
        if ((sp_mask) == 0xffff) {                      \
698
            ESP = (ESP & ~0xffff) | ((val) & 0xffff);   \
699
        } else if ((sp_mask) == 0xffffffffLL) {         \
700
            ESP = (uint32_t)(val);                      \
701
        } else {                                        \
702
            ESP = (val);                                \
703
        }                                               \
704
    } while (0)
705
#else
706
#define SET_ESP(val, sp_mask)                           \
707
    do {                                                \
708
        ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
709
    } while (0)
710
#endif
711

    
712
/* in 64-bit machines, this can overflow. So this segment addition macro
713
 * can be used to trim the value to 32-bit whenever needed */
714
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
715

    
716
/* XXX: add a is_user flag to have proper security support */
717
#define PUSHW(ssp, sp, sp_mask, val)                    \
718
    {                                                   \
719
        sp -= 2;                                        \
720
        stw_kernel((ssp) + (sp & (sp_mask)), (val));    \
721
    }
722

    
723
#define PUSHL(ssp, sp, sp_mask, val)                                    \
724
    {                                                                   \
725
        sp -= 4;                                                        \
726
        stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));        \
727
    }
728

    
729
#define POPW(ssp, sp, sp_mask, val)                     \
730
    {                                                   \
731
        val = lduw_kernel((ssp) + (sp & (sp_mask)));    \
732
        sp += 2;                                        \
733
    }
734

    
735
#define POPL(ssp, sp, sp_mask, val)                             \
736
    {                                                           \
737
        val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
738
        sp += 4;                                                \
739
    }
740

    
741
/* protected mode interrupt */
742
static void do_interrupt_protected(int intno, int is_int, int error_code,
743
                                   unsigned int next_eip, int is_hw)
744
{
745
    SegmentCache *dt;
746
    target_ulong ptr, ssp;
747
    int type, dpl, selector, ss_dpl, cpl;
748
    int has_error_code, new_stack, shift;
749
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
750
    uint32_t old_eip, sp_mask;
751

    
752
    has_error_code = 0;
753
    if (!is_int && !is_hw) {
754
        has_error_code = exception_has_error_code(intno);
755
    }
756
    if (is_int) {
757
        old_eip = next_eip;
758
    } else {
759
        old_eip = env->eip;
760
    }
761

    
762
    dt = &env->idt;
763
    if (intno * 8 + 7 > dt->limit) {
764
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
765
    }
766
    ptr = dt->base + intno * 8;
767
    e1 = ldl_kernel(ptr);
768
    e2 = ldl_kernel(ptr + 4);
769
    /* check gate type */
770
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
771
    switch (type) {
772
    case 5: /* task gate */
773
        /* must do that check here to return the correct error code */
774
        if (!(e2 & DESC_P_MASK)) {
775
            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
776
        }
777
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
778
        if (has_error_code) {
779
            int type;
780
            uint32_t mask;
781

    
782
            /* push the error code */
783
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
784
            shift = type >> 3;
785
            if (env->segs[R_SS].flags & DESC_B_MASK) {
786
                mask = 0xffffffff;
787
            } else {
788
                mask = 0xffff;
789
            }
790
            esp = (ESP - (2 << shift)) & mask;
791
            ssp = env->segs[R_SS].base + esp;
792
            if (shift) {
793
                stl_kernel(ssp, error_code);
794
            } else {
795
                stw_kernel(ssp, error_code);
796
            }
797
            SET_ESP(esp, mask);
798
        }
799
        return;
800
    case 6: /* 286 interrupt gate */
801
    case 7: /* 286 trap gate */
802
    case 14: /* 386 interrupt gate */
803
    case 15: /* 386 trap gate */
804
        break;
805
    default:
806
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
807
        break;
808
    }
809
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
810
    cpl = env->hflags & HF_CPL_MASK;
811
    /* check privilege if software int */
812
    if (is_int && dpl < cpl) {
813
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
814
    }
815
    /* check valid bit */
816
    if (!(e2 & DESC_P_MASK)) {
817
        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
818
    }
819
    selector = e1 >> 16;
820
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
821
    if ((selector & 0xfffc) == 0) {
822
        raise_exception_err(env, EXCP0D_GPF, 0);
823
    }
824
    if (load_segment(&e1, &e2, selector) != 0) {
825
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
826
    }
827
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
828
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
829
    }
830
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
831
    if (dpl > cpl) {
832
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
833
    }
834
    if (!(e2 & DESC_P_MASK)) {
835
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
836
    }
837
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
838
        /* to inner privilege */
839
        get_ss_esp_from_tss(&ss, &esp, dpl);
840
        if ((ss & 0xfffc) == 0) {
841
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
842
        }
843
        if ((ss & 3) != dpl) {
844
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
845
        }
846
        if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
847
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
848
        }
849
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
850
        if (ss_dpl != dpl) {
851
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
852
        }
853
        if (!(ss_e2 & DESC_S_MASK) ||
854
            (ss_e2 & DESC_CS_MASK) ||
855
            !(ss_e2 & DESC_W_MASK)) {
856
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
857
        }
858
        if (!(ss_e2 & DESC_P_MASK)) {
859
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
860
        }
861
        new_stack = 1;
862
        sp_mask = get_sp_mask(ss_e2);
863
        ssp = get_seg_base(ss_e1, ss_e2);
864
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
865
        /* to same privilege */
866
        if (env->eflags & VM_MASK) {
867
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
868
        }
869
        new_stack = 0;
870
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
871
        ssp = env->segs[R_SS].base;
872
        esp = ESP;
873
        dpl = cpl;
874
    } else {
875
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
876
        new_stack = 0; /* avoid warning */
877
        sp_mask = 0; /* avoid warning */
878
        ssp = 0; /* avoid warning */
879
        esp = 0; /* avoid warning */
880
    }
881

    
882
    shift = type >> 3;
883

    
884
#if 0
885
    /* XXX: check that enough room is available */
886
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
887
    if (env->eflags & VM_MASK) {
888
        push_size += 8;
889
    }
890
    push_size <<= shift;
891
#endif
892
    if (shift == 1) {
893
        if (new_stack) {
894
            if (env->eflags & VM_MASK) {
895
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
896
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
897
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
898
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
899
            }
900
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
901
            PUSHL(ssp, esp, sp_mask, ESP);
902
        }
903
        PUSHL(ssp, esp, sp_mask, compute_eflags());
904
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
905
        PUSHL(ssp, esp, sp_mask, old_eip);
906
        if (has_error_code) {
907
            PUSHL(ssp, esp, sp_mask, error_code);
908
        }
909
    } else {
910
        if (new_stack) {
911
            if (env->eflags & VM_MASK) {
912
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
913
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
914
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
915
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
916
            }
917
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
918
            PUSHW(ssp, esp, sp_mask, ESP);
919
        }
920
        PUSHW(ssp, esp, sp_mask, compute_eflags());
921
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
922
        PUSHW(ssp, esp, sp_mask, old_eip);
923
        if (has_error_code) {
924
            PUSHW(ssp, esp, sp_mask, error_code);
925
        }
926
    }
927

    
928
    if (new_stack) {
929
        if (env->eflags & VM_MASK) {
930
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
931
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
932
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
933
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
934
        }
935
        ss = (ss & ~3) | dpl;
936
        cpu_x86_load_seg_cache(env, R_SS, ss,
937
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
938
    }
939
    SET_ESP(esp, sp_mask);
940

    
941
    selector = (selector & ~3) | dpl;
942
    cpu_x86_load_seg_cache(env, R_CS, selector,
943
                   get_seg_base(e1, e2),
944
                   get_seg_limit(e1, e2),
945
                   e2);
946
    cpu_x86_set_cpl(env, dpl);
947
    env->eip = offset;
948

    
949
    /* interrupt gate clear IF mask */
950
    if ((type & 1) == 0) {
951
        env->eflags &= ~IF_MASK;
952
    }
953
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
954
}
955

    
956
#ifdef TARGET_X86_64
957

    
958
#define PUSHQ(sp, val)                          \
959
    {                                           \
960
        sp -= 8;                                \
961
        stq_kernel(sp, (val));                  \
962
    }
963

    
964
#define POPQ(sp, val)                           \
965
    {                                           \
966
        val = ldq_kernel(sp);                   \
967
        sp += 8;                                \
968
    }
969

    
970
static inline target_ulong get_rsp_from_tss(int level)
971
{
972
    int index;
973

    
974
#if 0
975
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
976
           env->tr.base, env->tr.limit);
977
#endif
978

    
979
    if (!(env->tr.flags & DESC_P_MASK)) {
980
        cpu_abort(env, "invalid tss");
981
    }
982
    index = 8 * level + 4;
983
    if ((index + 7) > env->tr.limit) {
984
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
985
    }
986
    return ldq_kernel(env->tr.base + index);
987
}
988

    
989
/* 64 bit interrupt */
990
static void do_interrupt64(int intno, int is_int, int error_code,
991
                           target_ulong next_eip, int is_hw)
992
{
993
    SegmentCache *dt;
994
    target_ulong ptr;
995
    int type, dpl, selector, cpl, ist;
996
    int has_error_code, new_stack;
997
    uint32_t e1, e2, e3, ss;
998
    target_ulong old_eip, esp, offset;
999

    
1000
    has_error_code = 0;
1001
    if (!is_int && !is_hw) {
1002
        has_error_code = exception_has_error_code(intno);
1003
    }
1004
    if (is_int) {
1005
        old_eip = next_eip;
1006
    } else {
1007
        old_eip = env->eip;
1008
    }
1009

    
1010
    dt = &env->idt;
1011
    if (intno * 16 + 15 > dt->limit) {
1012
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1013
    }
1014
    ptr = dt->base + intno * 16;
1015
    e1 = ldl_kernel(ptr);
1016
    e2 = ldl_kernel(ptr + 4);
1017
    e3 = ldl_kernel(ptr + 8);
1018
    /* check gate type */
1019
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1020
    switch (type) {
1021
    case 14: /* 386 interrupt gate */
1022
    case 15: /* 386 trap gate */
1023
        break;
1024
    default:
1025
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1026
        break;
1027
    }
1028
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1029
    cpl = env->hflags & HF_CPL_MASK;
1030
    /* check privilege if software int */
1031
    if (is_int && dpl < cpl) {
1032
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1033
    }
1034
    /* check valid bit */
1035
    if (!(e2 & DESC_P_MASK)) {
1036
        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
1037
    }
1038
    selector = e1 >> 16;
1039
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1040
    ist = e2 & 7;
1041
    if ((selector & 0xfffc) == 0) {
1042
        raise_exception_err(env, EXCP0D_GPF, 0);
1043
    }
1044

    
1045
    if (load_segment(&e1, &e2, selector) != 0) {
1046
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1047
    }
1048
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1049
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1050
    }
1051
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1052
    if (dpl > cpl) {
1053
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1054
    }
1055
    if (!(e2 & DESC_P_MASK)) {
1056
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1057
    }
1058
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1059
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1060
    }
1061
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1062
        /* to inner privilege */
1063
        if (ist != 0) {
1064
            esp = get_rsp_from_tss(ist + 3);
1065
        } else {
1066
            esp = get_rsp_from_tss(dpl);
1067
        }
1068
        esp &= ~0xfLL; /* align stack */
1069
        ss = 0;
1070
        new_stack = 1;
1071
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1072
        /* to same privilege */
1073
        if (env->eflags & VM_MASK) {
1074
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1075
        }
1076
        new_stack = 0;
1077
        if (ist != 0) {
1078
            esp = get_rsp_from_tss(ist + 3);
1079
        } else {
1080
            esp = ESP;
1081
        }
1082
        esp &= ~0xfLL; /* align stack */
1083
        dpl = cpl;
1084
    } else {
1085
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1086
        new_stack = 0; /* avoid warning */
1087
        esp = 0; /* avoid warning */
1088
    }
1089

    
1090
    PUSHQ(esp, env->segs[R_SS].selector);
1091
    PUSHQ(esp, ESP);
1092
    PUSHQ(esp, compute_eflags());
1093
    PUSHQ(esp, env->segs[R_CS].selector);
1094
    PUSHQ(esp, old_eip);
1095
    if (has_error_code) {
1096
        PUSHQ(esp, error_code);
1097
    }
1098

    
1099
    if (new_stack) {
1100
        ss = 0 | dpl;
1101
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1102
    }
1103
    ESP = esp;
1104

    
1105
    selector = (selector & ~3) | dpl;
1106
    cpu_x86_load_seg_cache(env, R_CS, selector,
1107
                   get_seg_base(e1, e2),
1108
                   get_seg_limit(e1, e2),
1109
                   e2);
1110
    cpu_x86_set_cpl(env, dpl);
1111
    env->eip = offset;
1112

    
1113
    /* interrupt gate clear IF mask */
1114
    if ((type & 1) == 0) {
1115
        env->eflags &= ~IF_MASK;
1116
    }
1117
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1118
}
1119
#endif
1120

    
1121
#ifdef TARGET_X86_64
1122
#if defined(CONFIG_USER_ONLY)
1123
void helper_syscall(int next_eip_addend)
1124
{
1125
    env->exception_index = EXCP_SYSCALL;
1126
    env->exception_next_eip = env->eip + next_eip_addend;
1127
    cpu_loop_exit(env);
1128
}
1129
#else
1130
void helper_syscall(int next_eip_addend)
1131
{
1132
    int selector;
1133

    
1134
    if (!(env->efer & MSR_EFER_SCE)) {
1135
        raise_exception_err(env, EXCP06_ILLOP, 0);
1136
    }
1137
    selector = (env->star >> 32) & 0xffff;
1138
    if (env->hflags & HF_LMA_MASK) {
1139
        int code64;
1140

    
1141
        ECX = env->eip + next_eip_addend;
1142
        env->regs[11] = compute_eflags();
1143

    
1144
        code64 = env->hflags & HF_CS64_MASK;
1145

    
1146
        cpu_x86_set_cpl(env, 0);
1147
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1148
                           0, 0xffffffff,
1149
                               DESC_G_MASK | DESC_P_MASK |
1150
                               DESC_S_MASK |
1151
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1152
                               DESC_L_MASK);
1153
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1154
                               0, 0xffffffff,
1155
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1156
                               DESC_S_MASK |
1157
                               DESC_W_MASK | DESC_A_MASK);
1158
        env->eflags &= ~env->fmask;
1159
        load_eflags(env->eflags, 0);
1160
        if (code64) {
1161
            env->eip = env->lstar;
1162
        } else {
1163
            env->eip = env->cstar;
1164
        }
1165
    } else {
1166
        ECX = (uint32_t)(env->eip + next_eip_addend);
1167

    
1168
        cpu_x86_set_cpl(env, 0);
1169
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1170
                           0, 0xffffffff,
1171
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1172
                               DESC_S_MASK |
1173
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1174
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1175
                               0, 0xffffffff,
1176
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1177
                               DESC_S_MASK |
1178
                               DESC_W_MASK | DESC_A_MASK);
1179
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1180
        env->eip = (uint32_t)env->star;
1181
    }
1182
}
1183
#endif
1184
#endif
1185

    
1186
#ifdef TARGET_X86_64
1187
void helper_sysret(int dflag)
1188
{
1189
    int cpl, selector;
1190

    
1191
    if (!(env->efer & MSR_EFER_SCE)) {
1192
        raise_exception_err(env, EXCP06_ILLOP, 0);
1193
    }
1194
    cpl = env->hflags & HF_CPL_MASK;
1195
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1196
        raise_exception_err(env, EXCP0D_GPF, 0);
1197
    }
1198
    selector = (env->star >> 48) & 0xffff;
1199
    if (env->hflags & HF_LMA_MASK) {
1200
        if (dflag == 2) {
1201
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1202
                                   0, 0xffffffff,
1203
                                   DESC_G_MASK | DESC_P_MASK |
1204
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1205
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1206
                                   DESC_L_MASK);
1207
            env->eip = ECX;
1208
        } else {
1209
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1210
                                   0, 0xffffffff,
1211
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1214
            env->eip = (uint32_t)ECX;
1215
        }
1216
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1217
                               0, 0xffffffff,
1218
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1219
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1220
                               DESC_W_MASK | DESC_A_MASK);
1221
        load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1222
                    IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1223
        cpu_x86_set_cpl(env, 3);
1224
    } else {
1225
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1226
                               0, 0xffffffff,
1227
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1228
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1229
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1230
        env->eip = (uint32_t)ECX;
1231
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1232
                               0, 0xffffffff,
1233
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1234
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1235
                               DESC_W_MASK | DESC_A_MASK);
1236
        env->eflags |= IF_MASK;
1237
        cpu_x86_set_cpl(env, 3);
1238
    }
1239
}
1240
#endif
1241

    
1242
/* real mode interrupt */
1243
static void do_interrupt_real(int intno, int is_int, int error_code,
1244
                              unsigned int next_eip)
1245
{
1246
    SegmentCache *dt;
1247
    target_ulong ptr, ssp;
1248
    int selector;
1249
    uint32_t offset, esp;
1250
    uint32_t old_cs, old_eip;
1251

    
1252
    /* real mode (simpler!) */
1253
    dt = &env->idt;
1254
    if (intno * 4 + 3 > dt->limit) {
1255
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1256
    }
1257
    ptr = dt->base + intno * 4;
1258
    offset = lduw_kernel(ptr);
1259
    selector = lduw_kernel(ptr + 2);
1260
    esp = ESP;
1261
    ssp = env->segs[R_SS].base;
1262
    if (is_int) {
1263
        old_eip = next_eip;
1264
    } else {
1265
        old_eip = env->eip;
1266
    }
1267
    old_cs = env->segs[R_CS].selector;
1268
    /* XXX: use SS segment size? */
1269
    PUSHW(ssp, esp, 0xffff, compute_eflags());
1270
    PUSHW(ssp, esp, 0xffff, old_cs);
1271
    PUSHW(ssp, esp, 0xffff, old_eip);
1272

    
1273
    /* update processor state */
1274
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1275
    env->eip = offset;
1276
    env->segs[R_CS].selector = selector;
1277
    env->segs[R_CS].base = (selector << 4);
1278
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1279
}
1280

    
1281
#if defined(CONFIG_USER_ONLY)
1282
/* fake user mode interrupt */
1283
static void do_interrupt_user(int intno, int is_int, int error_code,
1284
                              target_ulong next_eip)
1285
{
1286
    SegmentCache *dt;
1287
    target_ulong ptr;
1288
    int dpl, cpl, shift;
1289
    uint32_t e2;
1290

    
1291
    dt = &env->idt;
1292
    if (env->hflags & HF_LMA_MASK) {
1293
        shift = 4;
1294
    } else {
1295
        shift = 3;
1296
    }
1297
    ptr = dt->base + (intno << shift);
1298
    e2 = ldl_kernel(ptr + 4);
1299

    
1300
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1301
    cpl = env->hflags & HF_CPL_MASK;
1302
    /* check privilege if software int */
1303
    if (is_int && dpl < cpl) {
1304
        raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1305
    }
1306

    
1307
    /* Since we emulate only user space, we cannot do more than
1308
       exiting the emulation with the suitable exception and error
1309
       code */
1310
    if (is_int) {
1311
        EIP = next_eip;
1312
    }
1313
}
1314

    
1315
#else
1316

    
1317
static void handle_even_inj(int intno, int is_int, int error_code,
1318
                            int is_hw, int rm)
1319
{
1320
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1321
                                                          control.event_inj));
1322

    
1323
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1324
        int type;
1325

    
1326
        if (is_int) {
1327
            type = SVM_EVTINJ_TYPE_SOFT;
1328
        } else {
1329
            type = SVM_EVTINJ_TYPE_EXEPT;
1330
        }
1331
        event_inj = intno | type | SVM_EVTINJ_VALID;
1332
        if (!rm && exception_has_error_code(intno)) {
1333
            event_inj |= SVM_EVTINJ_VALID_ERR;
1334
            stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1335
                                             control.event_inj_err),
1336
                     error_code);
1337
        }
1338
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1339
                 event_inj);
1340
    }
1341
}
1342
#endif
1343

    
1344
/*
1345
 * Begin execution of an interruption. is_int is TRUE if coming from
1346
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1347
 * instruction. It is only relevant if is_int is TRUE.
1348
 */
1349
static void do_interrupt_all(int intno, int is_int, int error_code,
1350
                             target_ulong next_eip, int is_hw)
1351
{
1352
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1353
        if ((env->cr[0] & CR0_PE_MASK)) {
1354
            static int count;
1355

    
1356
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1357
                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1358
                     count, intno, error_code, is_int,
1359
                     env->hflags & HF_CPL_MASK,
1360
                     env->segs[R_CS].selector, EIP,
1361
                     (int)env->segs[R_CS].base + EIP,
1362
                     env->segs[R_SS].selector, ESP);
1363
            if (intno == 0x0e) {
1364
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1365
            } else {
1366
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1367
            }
1368
            qemu_log("\n");
1369
            log_cpu_state(env, X86_DUMP_CCOP);
1370
#if 0
1371
            {
1372
                int i;
1373
                target_ulong ptr;
1374

1375
                qemu_log("       code=");
1376
                ptr = env->segs[R_CS].base + env->eip;
1377
                for (i = 0; i < 16; i++) {
1378
                    qemu_log(" %02x", ldub(ptr + i));
1379
                }
1380
                qemu_log("\n");
1381
            }
1382
#endif
1383
            count++;
1384
        }
1385
    }
1386
    if (env->cr[0] & CR0_PE_MASK) {
1387
#if !defined(CONFIG_USER_ONLY)
1388
        if (env->hflags & HF_SVMI_MASK) {
1389
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1390
        }
1391
#endif
1392
#ifdef TARGET_X86_64
1393
        if (env->hflags & HF_LMA_MASK) {
1394
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1395
        } else
1396
#endif
1397
        {
1398
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1399
        }
1400
    } else {
1401
#if !defined(CONFIG_USER_ONLY)
1402
        if (env->hflags & HF_SVMI_MASK) {
1403
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1404
        }
1405
#endif
1406
        do_interrupt_real(intno, is_int, error_code, next_eip);
1407
    }
1408

    
1409
#if !defined(CONFIG_USER_ONLY)
1410
    if (env->hflags & HF_SVMI_MASK) {
1411
        uint32_t event_inj = ldl_phys(env->vm_vmcb +
1412
                                      offsetof(struct vmcb,
1413
                                               control.event_inj));
1414

    
1415
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1416
                 event_inj & ~SVM_EVTINJ_VALID);
1417
    }
1418
#endif
1419
}
1420

    
1421
void do_interrupt(CPUX86State *env1)
1422
{
1423
    CPUX86State *saved_env;
1424

    
1425
    saved_env = env;
1426
    env = env1;
1427
#if defined(CONFIG_USER_ONLY)
1428
    /* if user mode only, we simulate a fake exception
1429
       which will be handled outside the cpu execution
1430
       loop */
1431
    do_interrupt_user(env->exception_index,
1432
                      env->exception_is_int,
1433
                      env->error_code,
1434
                      env->exception_next_eip);
1435
    /* successfully delivered */
1436
    env->old_exception = -1;
1437
#else
1438
    /* simulate a real cpu exception. On i386, it can
1439
       trigger new exceptions, but we do not handle
1440
       double or triple faults yet. */
1441
    do_interrupt_all(env->exception_index,
1442
                     env->exception_is_int,
1443
                     env->error_code,
1444
                     env->exception_next_eip, 0);
1445
    /* successfully delivered */
1446
    env->old_exception = -1;
1447
#endif
1448
    env = saved_env;
1449
}
1450

    
1451
void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1452
{
1453
    CPUX86State *saved_env;
1454

    
1455
    saved_env = env;
1456
    env = env1;
1457
    do_interrupt_all(intno, 0, 0, 0, is_hw);
1458
    env = saved_env;
1459
}
1460

    
1461
/* SMM support */
1462

    
1463
#if defined(CONFIG_USER_ONLY)
1464

    
1465
void do_smm_enter(CPUX86State *env1)
1466
{
1467
}
1468

    
1469
void helper_rsm(void)
1470
{
1471
}
1472

    
1473
#else
1474

    
1475
#ifdef TARGET_X86_64
1476
#define SMM_REVISION_ID 0x00020064
1477
#else
1478
#define SMM_REVISION_ID 0x00020000
1479
#endif
1480

    
1481
void do_smm_enter(CPUX86State *env1)
1482
{
1483
    target_ulong sm_state;
1484
    SegmentCache *dt;
1485
    int i, offset;
1486
    CPUX86State *saved_env;
1487

    
1488
    saved_env = env;
1489
    env = env1;
1490

    
1491
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1492
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1493

    
1494
    env->hflags |= HF_SMM_MASK;
1495
    cpu_smm_update(env);
1496

    
1497
    sm_state = env->smbase + 0x8000;
1498

    
1499
#ifdef TARGET_X86_64
1500
    for (i = 0; i < 6; i++) {
1501
        dt = &env->segs[i];
1502
        offset = 0x7e00 + i * 16;
1503
        stw_phys(sm_state + offset, dt->selector);
1504
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1505
        stl_phys(sm_state + offset + 4, dt->limit);
1506
        stq_phys(sm_state + offset + 8, dt->base);
1507
    }
1508

    
1509
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1510
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1511

    
1512
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1513
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1514
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1515
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1516

    
1517
    stq_phys(sm_state + 0x7e88, env->idt.base);
1518
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1519

    
1520
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1521
    stq_phys(sm_state + 0x7e98, env->tr.base);
1522
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1523
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1524

    
1525
    stq_phys(sm_state + 0x7ed0, env->efer);
1526

    
1527
    stq_phys(sm_state + 0x7ff8, EAX);
1528
    stq_phys(sm_state + 0x7ff0, ECX);
1529
    stq_phys(sm_state + 0x7fe8, EDX);
1530
    stq_phys(sm_state + 0x7fe0, EBX);
1531
    stq_phys(sm_state + 0x7fd8, ESP);
1532
    stq_phys(sm_state + 0x7fd0, EBP);
1533
    stq_phys(sm_state + 0x7fc8, ESI);
1534
    stq_phys(sm_state + 0x7fc0, EDI);
1535
    for (i = 8; i < 16; i++) {
1536
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1537
    }
1538
    stq_phys(sm_state + 0x7f78, env->eip);
1539
    stl_phys(sm_state + 0x7f70, compute_eflags());
1540
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1541
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1542

    
1543
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1544
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1545
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1546

    
1547
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1548
    stl_phys(sm_state + 0x7f00, env->smbase);
1549
#else
1550
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1551
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1552
    stl_phys(sm_state + 0x7ff4, compute_eflags());
1553
    stl_phys(sm_state + 0x7ff0, env->eip);
1554
    stl_phys(sm_state + 0x7fec, EDI);
1555
    stl_phys(sm_state + 0x7fe8, ESI);
1556
    stl_phys(sm_state + 0x7fe4, EBP);
1557
    stl_phys(sm_state + 0x7fe0, ESP);
1558
    stl_phys(sm_state + 0x7fdc, EBX);
1559
    stl_phys(sm_state + 0x7fd8, EDX);
1560
    stl_phys(sm_state + 0x7fd4, ECX);
1561
    stl_phys(sm_state + 0x7fd0, EAX);
1562
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1563
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1564

    
1565
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1566
    stl_phys(sm_state + 0x7f64, env->tr.base);
1567
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1568
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1569

    
1570
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1571
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1572
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1573
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1574

    
1575
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1576
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1577

    
1578
    stl_phys(sm_state + 0x7f58, env->idt.base);
1579
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1580

    
1581
    for (i = 0; i < 6; i++) {
1582
        dt = &env->segs[i];
1583
        if (i < 3) {
1584
            offset = 0x7f84 + i * 12;
1585
        } else {
1586
            offset = 0x7f2c + (i - 3) * 12;
1587
        }
1588
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1589
        stl_phys(sm_state + offset + 8, dt->base);
1590
        stl_phys(sm_state + offset + 4, dt->limit);
1591
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1592
    }
1593
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1594

    
1595
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1596
    stl_phys(sm_state + 0x7ef8, env->smbase);
1597
#endif
1598
    /* init SMM cpu state */
1599

    
1600
#ifdef TARGET_X86_64
1601
    cpu_load_efer(env, 0);
1602
#endif
1603
    load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1604
    env->eip = 0x00008000;
1605
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1606
                           0xffffffff, 0);
1607
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1608
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1609
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1610
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1611
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1612

    
1613
    cpu_x86_update_cr0(env,
1614
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1615
                                      CR0_PG_MASK));
1616
    cpu_x86_update_cr4(env, 0);
1617
    env->dr[7] = 0x00000400;
1618
    CC_OP = CC_OP_EFLAGS;
1619
    env = saved_env;
1620
}
1621

    
1622
void helper_rsm(void)
1623
{
1624
    target_ulong sm_state;
1625
    int i, offset;
1626
    uint32_t val;
1627

    
1628
    sm_state = env->smbase + 0x8000;
1629
#ifdef TARGET_X86_64
1630
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1631

    
1632
    for (i = 0; i < 6; i++) {
1633
        offset = 0x7e00 + i * 16;
1634
        cpu_x86_load_seg_cache(env, i,
1635
                               lduw_phys(sm_state + offset),
1636
                               ldq_phys(sm_state + offset + 8),
1637
                               ldl_phys(sm_state + offset + 4),
1638
                               (lduw_phys(sm_state + offset + 2) &
1639
                                0xf0ff) << 8);
1640
    }
1641

    
1642
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1643
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1644

    
1645
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1646
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1647
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1648
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1649

    
1650
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1651
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1652

    
1653
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1654
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1655
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1656
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1657

    
1658
    EAX = ldq_phys(sm_state + 0x7ff8);
1659
    ECX = ldq_phys(sm_state + 0x7ff0);
1660
    EDX = ldq_phys(sm_state + 0x7fe8);
1661
    EBX = ldq_phys(sm_state + 0x7fe0);
1662
    ESP = ldq_phys(sm_state + 0x7fd8);
1663
    EBP = ldq_phys(sm_state + 0x7fd0);
1664
    ESI = ldq_phys(sm_state + 0x7fc8);
1665
    EDI = ldq_phys(sm_state + 0x7fc0);
1666
    for (i = 8; i < 16; i++) {
1667
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1668
    }
1669
    env->eip = ldq_phys(sm_state + 0x7f78);
1670
    load_eflags(ldl_phys(sm_state + 0x7f70),
1671
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1672
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1673
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1674

    
1675
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1676
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1677
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1678

    
1679
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1680
    if (val & 0x20000) {
1681
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1682
    }
1683
#else
1684
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1685
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1686
    load_eflags(ldl_phys(sm_state + 0x7ff4),
1687
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1688
    env->eip = ldl_phys(sm_state + 0x7ff0);
1689
    EDI = ldl_phys(sm_state + 0x7fec);
1690
    ESI = ldl_phys(sm_state + 0x7fe8);
1691
    EBP = ldl_phys(sm_state + 0x7fe4);
1692
    ESP = ldl_phys(sm_state + 0x7fe0);
1693
    EBX = ldl_phys(sm_state + 0x7fdc);
1694
    EDX = ldl_phys(sm_state + 0x7fd8);
1695
    ECX = ldl_phys(sm_state + 0x7fd4);
1696
    EAX = ldl_phys(sm_state + 0x7fd0);
1697
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1698
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1699

    
1700
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1701
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1702
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1703
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1704

    
1705
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1706
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1707
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1708
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1709

    
1710
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1711
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1712

    
1713
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1714
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1715

    
1716
    for (i = 0; i < 6; i++) {
1717
        if (i < 3) {
1718
            offset = 0x7f84 + i * 12;
1719
        } else {
1720
            offset = 0x7f2c + (i - 3) * 12;
1721
        }
1722
        cpu_x86_load_seg_cache(env, i,
1723
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1724
                               ldl_phys(sm_state + offset + 8),
1725
                               ldl_phys(sm_state + offset + 4),
1726
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1727
    }
1728
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1729

    
1730
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1731
    if (val & 0x20000) {
1732
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1733
    }
1734
#endif
1735
    CC_OP = CC_OP_EFLAGS;
1736
    env->hflags &= ~HF_SMM_MASK;
1737
    cpu_smm_update(env);
1738

    
1739
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1740
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1741
}
1742

    
1743
#endif /* !CONFIG_USER_ONLY */
1744

    
1745

    
1746
/* division, flags are undefined */
1747

    
1748
void helper_divb_AL(target_ulong t0)
1749
{
1750
    unsigned int num, den, q, r;
1751

    
1752
    num = (EAX & 0xffff);
1753
    den = (t0 & 0xff);
1754
    if (den == 0) {
1755
        raise_exception(env, EXCP00_DIVZ);
1756
    }
1757
    q = (num / den);
1758
    if (q > 0xff) {
1759
        raise_exception(env, EXCP00_DIVZ);
1760
    }
1761
    q &= 0xff;
1762
    r = (num % den) & 0xff;
1763
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1764
}
1765

    
1766
void helper_idivb_AL(target_ulong t0)
1767
{
1768
    int num, den, q, r;
1769

    
1770
    num = (int16_t)EAX;
1771
    den = (int8_t)t0;
1772
    if (den == 0) {
1773
        raise_exception(env, EXCP00_DIVZ);
1774
    }
1775
    q = (num / den);
1776
    if (q != (int8_t)q) {
1777
        raise_exception(env, EXCP00_DIVZ);
1778
    }
1779
    q &= 0xff;
1780
    r = (num % den) & 0xff;
1781
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1782
}
1783

    
1784
void helper_divw_AX(target_ulong t0)
1785
{
1786
    unsigned int num, den, q, r;
1787

    
1788
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1789
    den = (t0 & 0xffff);
1790
    if (den == 0) {
1791
        raise_exception(env, EXCP00_DIVZ);
1792
    }
1793
    q = (num / den);
1794
    if (q > 0xffff) {
1795
        raise_exception(env, EXCP00_DIVZ);
1796
    }
1797
    q &= 0xffff;
1798
    r = (num % den) & 0xffff;
1799
    EAX = (EAX & ~0xffff) | q;
1800
    EDX = (EDX & ~0xffff) | r;
1801
}
1802

    
1803
void helper_idivw_AX(target_ulong t0)
1804
{
1805
    int num, den, q, r;
1806

    
1807
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1808
    den = (int16_t)t0;
1809
    if (den == 0) {
1810
        raise_exception(env, EXCP00_DIVZ);
1811
    }
1812
    q = (num / den);
1813
    if (q != (int16_t)q) {
1814
        raise_exception(env, EXCP00_DIVZ);
1815
    }
1816
    q &= 0xffff;
1817
    r = (num % den) & 0xffff;
1818
    EAX = (EAX & ~0xffff) | q;
1819
    EDX = (EDX & ~0xffff) | r;
1820
}
1821

    
1822
void helper_divl_EAX(target_ulong t0)
1823
{
1824
    unsigned int den, r;
1825
    uint64_t num, q;
1826

    
1827
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1828
    den = t0;
1829
    if (den == 0) {
1830
        raise_exception(env, EXCP00_DIVZ);
1831
    }
1832
    q = (num / den);
1833
    r = (num % den);
1834
    if (q > 0xffffffff) {
1835
        raise_exception(env, EXCP00_DIVZ);
1836
    }
1837
    EAX = (uint32_t)q;
1838
    EDX = (uint32_t)r;
1839
}
1840

    
1841
void helper_idivl_EAX(target_ulong t0)
1842
{
1843
    int den, r;
1844
    int64_t num, q;
1845

    
1846
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1847
    den = t0;
1848
    if (den == 0) {
1849
        raise_exception(env, EXCP00_DIVZ);
1850
    }
1851
    q = (num / den);
1852
    r = (num % den);
1853
    if (q != (int32_t)q) {
1854
        raise_exception(env, EXCP00_DIVZ);
1855
    }
1856
    EAX = (uint32_t)q;
1857
    EDX = (uint32_t)r;
1858
}
1859

    
1860
/* bcd */
1861

    
1862
/* XXX: exception */
1863
void helper_aam(int base)
1864
{
1865
    int al, ah;
1866

    
1867
    al = EAX & 0xff;
1868
    ah = al / base;
1869
    al = al % base;
1870
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1871
    CC_DST = al;
1872
}
1873

    
1874
void helper_aad(int base)
1875
{
1876
    int al, ah;
1877

    
1878
    al = EAX & 0xff;
1879
    ah = (EAX >> 8) & 0xff;
1880
    al = ((ah * base) + al) & 0xff;
1881
    EAX = (EAX & ~0xffff) | al;
1882
    CC_DST = al;
1883
}
1884

    
1885
void helper_aaa(void)
1886
{
1887
    int icarry;
1888
    int al, ah, af;
1889
    int eflags;
1890

    
1891
    eflags = helper_cc_compute_all(CC_OP);
1892
    af = eflags & CC_A;
1893
    al = EAX & 0xff;
1894
    ah = (EAX >> 8) & 0xff;
1895

    
1896
    icarry = (al > 0xf9);
1897
    if (((al & 0x0f) > 9) || af) {
1898
        al = (al + 6) & 0x0f;
1899
        ah = (ah + 1 + icarry) & 0xff;
1900
        eflags |= CC_C | CC_A;
1901
    } else {
1902
        eflags &= ~(CC_C | CC_A);
1903
        al &= 0x0f;
1904
    }
1905
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1906
    CC_SRC = eflags;
1907
}
1908

    
1909
void helper_aas(void)
1910
{
1911
    int icarry;
1912
    int al, ah, af;
1913
    int eflags;
1914

    
1915
    eflags = helper_cc_compute_all(CC_OP);
1916
    af = eflags & CC_A;
1917
    al = EAX & 0xff;
1918
    ah = (EAX >> 8) & 0xff;
1919

    
1920
    icarry = (al < 6);
1921
    if (((al & 0x0f) > 9) || af) {
1922
        al = (al - 6) & 0x0f;
1923
        ah = (ah - 1 - icarry) & 0xff;
1924
        eflags |= CC_C | CC_A;
1925
    } else {
1926
        eflags &= ~(CC_C | CC_A);
1927
        al &= 0x0f;
1928
    }
1929
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1930
    CC_SRC = eflags;
1931
}
1932

    
1933
void helper_daa(void)
1934
{
1935
    int old_al, al, af, cf;
1936
    int eflags;
1937

    
1938
    eflags = helper_cc_compute_all(CC_OP);
1939
    cf = eflags & CC_C;
1940
    af = eflags & CC_A;
1941
    old_al = al = EAX & 0xff;
1942

    
1943
    eflags = 0;
1944
    if (((al & 0x0f) > 9) || af) {
1945
        al = (al + 6) & 0xff;
1946
        eflags |= CC_A;
1947
    }
1948
    if ((old_al > 0x99) || cf) {
1949
        al = (al + 0x60) & 0xff;
1950
        eflags |= CC_C;
1951
    }
1952
    EAX = (EAX & ~0xff) | al;
1953
    /* well, speed is not an issue here, so we compute the flags by hand */
1954
    eflags |= (al == 0) << 6; /* zf */
1955
    eflags |= parity_table[al]; /* pf */
1956
    eflags |= (al & 0x80); /* sf */
1957
    CC_SRC = eflags;
1958
}
1959

    
1960
void helper_das(void)
1961
{
1962
    int al, al1, af, cf;
1963
    int eflags;
1964

    
1965
    eflags = helper_cc_compute_all(CC_OP);
1966
    cf = eflags & CC_C;
1967
    af = eflags & CC_A;
1968
    al = EAX & 0xff;
1969

    
1970
    eflags = 0;
1971
    al1 = al;
1972
    if (((al & 0x0f) > 9) || af) {
1973
        eflags |= CC_A;
1974
        if (al < 6 || cf) {
1975
            eflags |= CC_C;
1976
        }
1977
        al = (al - 6) & 0xff;
1978
    }
1979
    if ((al1 > 0x99) || cf) {
1980
        al = (al - 0x60) & 0xff;
1981
        eflags |= CC_C;
1982
    }
1983
    EAX = (EAX & ~0xff) | al;
1984
    /* well, speed is not an issue here, so we compute the flags by hand */
1985
    eflags |= (al == 0) << 6; /* zf */
1986
    eflags |= parity_table[al]; /* pf */
1987
    eflags |= (al & 0x80); /* sf */
1988
    CC_SRC = eflags;
1989
}
1990

    
1991
void helper_into(int next_eip_addend)
1992
{
1993
    int eflags;
1994

    
1995
    eflags = helper_cc_compute_all(CC_OP);
1996
    if (eflags & CC_O) {
1997
        raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
1998
    }
1999
}
2000

    
2001
void helper_cmpxchg8b(target_ulong a0)
2002
{
2003
    uint64_t d;
2004
    int eflags;
2005

    
2006
    eflags = helper_cc_compute_all(CC_OP);
2007
    d = ldq(a0);
2008
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2009
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2010
        eflags |= CC_Z;
2011
    } else {
2012
        /* always do the store */
2013
        stq(a0, d);
2014
        EDX = (uint32_t)(d >> 32);
2015
        EAX = (uint32_t)d;
2016
        eflags &= ~CC_Z;
2017
    }
2018
    CC_SRC = eflags;
2019
}
2020

    
2021
#ifdef TARGET_X86_64
2022
void helper_cmpxchg16b(target_ulong a0)
2023
{
2024
    uint64_t d0, d1;
2025
    int eflags;
2026

    
2027
    if ((a0 & 0xf) != 0) {
2028
        raise_exception(env, EXCP0D_GPF);
2029
    }
2030
    eflags = helper_cc_compute_all(CC_OP);
2031
    d0 = ldq(a0);
2032
    d1 = ldq(a0 + 8);
2033
    if (d0 == EAX && d1 == EDX) {
2034
        stq(a0, EBX);
2035
        stq(a0 + 8, ECX);
2036
        eflags |= CC_Z;
2037
    } else {
2038
        /* always do the store */
2039
        stq(a0, d0);
2040
        stq(a0 + 8, d1);
2041
        EDX = d1;
2042
        EAX = d0;
2043
        eflags &= ~CC_Z;
2044
    }
2045
    CC_SRC = eflags;
2046
}
2047
#endif
2048

    
2049
void helper_single_step(void)
2050
{
2051
#ifndef CONFIG_USER_ONLY
2052
    check_hw_breakpoints(env, 1);
2053
    env->dr[6] |= DR6_BS;
2054
#endif
2055
    raise_exception(env, EXCP01_DB);
2056
}
2057

    
2058
void helper_cpuid(void)
2059
{
2060
    uint32_t eax, ebx, ecx, edx;
2061

    
2062
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2063

    
2064
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2065
    EAX = eax;
2066
    EBX = ebx;
2067
    ECX = ecx;
2068
    EDX = edx;
2069
}
2070

    
2071
void helper_enter_level(int level, int data32, target_ulong t1)
2072
{
2073
    target_ulong ssp;
2074
    uint32_t esp_mask, esp, ebp;
2075

    
2076
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2077
    ssp = env->segs[R_SS].base;
2078
    ebp = EBP;
2079
    esp = ESP;
2080
    if (data32) {
2081
        /* 32 bit */
2082
        esp -= 4;
2083
        while (--level) {
2084
            esp -= 4;
2085
            ebp -= 4;
2086
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2087
        }
2088
        esp -= 4;
2089
        stl(ssp + (esp & esp_mask), t1);
2090
    } else {
2091
        /* 16 bit */
2092
        esp -= 2;
2093
        while (--level) {
2094
            esp -= 2;
2095
            ebp -= 2;
2096
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2097
        }
2098
        esp -= 2;
2099
        stw(ssp + (esp & esp_mask), t1);
2100
    }
2101
}
2102

    
2103
#ifdef TARGET_X86_64
2104
void helper_enter64_level(int level, int data64, target_ulong t1)
2105
{
2106
    target_ulong esp, ebp;
2107

    
2108
    ebp = EBP;
2109
    esp = ESP;
2110

    
2111
    if (data64) {
2112
        /* 64 bit */
2113
        esp -= 8;
2114
        while (--level) {
2115
            esp -= 8;
2116
            ebp -= 8;
2117
            stq(esp, ldq(ebp));
2118
        }
2119
        esp -= 8;
2120
        stq(esp, t1);
2121
    } else {
2122
        /* 16 bit */
2123
        esp -= 2;
2124
        while (--level) {
2125
            esp -= 2;
2126
            ebp -= 2;
2127
            stw(esp, lduw(ebp));
2128
        }
2129
        esp -= 2;
2130
        stw(esp, t1);
2131
    }
2132
}
2133
#endif
2134

    
2135
void helper_lldt(int selector)
2136
{
2137
    SegmentCache *dt;
2138
    uint32_t e1, e2;
2139
    int index, entry_limit;
2140
    target_ulong ptr;
2141

    
2142
    selector &= 0xffff;
2143
    if ((selector & 0xfffc) == 0) {
2144
        /* XXX: NULL selector case: invalid LDT */
2145
        env->ldt.base = 0;
2146
        env->ldt.limit = 0;
2147
    } else {
2148
        if (selector & 0x4) {
2149
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2150
        }
2151
        dt = &env->gdt;
2152
        index = selector & ~7;
2153
#ifdef TARGET_X86_64
2154
        if (env->hflags & HF_LMA_MASK) {
2155
            entry_limit = 15;
2156
        } else
2157
#endif
2158
        {
2159
            entry_limit = 7;
2160
        }
2161
        if ((index + entry_limit) > dt->limit) {
2162
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2163
        }
2164
        ptr = dt->base + index;
2165
        e1 = ldl_kernel(ptr);
2166
        e2 = ldl_kernel(ptr + 4);
2167
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
2168
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2169
        }
2170
        if (!(e2 & DESC_P_MASK)) {
2171
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2172
        }
2173
#ifdef TARGET_X86_64
2174
        if (env->hflags & HF_LMA_MASK) {
2175
            uint32_t e3;
2176

    
2177
            e3 = ldl_kernel(ptr + 8);
2178
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2179
            env->ldt.base |= (target_ulong)e3 << 32;
2180
        } else
2181
#endif
2182
        {
2183
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2184
        }
2185
    }
2186
    env->ldt.selector = selector;
2187
}
2188

    
2189
void helper_ltr(int selector)
2190
{
2191
    SegmentCache *dt;
2192
    uint32_t e1, e2;
2193
    int index, type, entry_limit;
2194
    target_ulong ptr;
2195

    
2196
    selector &= 0xffff;
2197
    if ((selector & 0xfffc) == 0) {
2198
        /* NULL selector case: invalid TR */
2199
        env->tr.base = 0;
2200
        env->tr.limit = 0;
2201
        env->tr.flags = 0;
2202
    } else {
2203
        if (selector & 0x4) {
2204
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2205
        }
2206
        dt = &env->gdt;
2207
        index = selector & ~7;
2208
#ifdef TARGET_X86_64
2209
        if (env->hflags & HF_LMA_MASK) {
2210
            entry_limit = 15;
2211
        } else
2212
#endif
2213
        {
2214
            entry_limit = 7;
2215
        }
2216
        if ((index + entry_limit) > dt->limit) {
2217
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2218
        }
2219
        ptr = dt->base + index;
2220
        e1 = ldl_kernel(ptr);
2221
        e2 = ldl_kernel(ptr + 4);
2222
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2223
        if ((e2 & DESC_S_MASK) ||
2224
            (type != 1 && type != 9)) {
2225
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2226
        }
2227
        if (!(e2 & DESC_P_MASK)) {
2228
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2229
        }
2230
#ifdef TARGET_X86_64
2231
        if (env->hflags & HF_LMA_MASK) {
2232
            uint32_t e3, e4;
2233

    
2234
            e3 = ldl_kernel(ptr + 8);
2235
            e4 = ldl_kernel(ptr + 12);
2236
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
2237
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2238
            }
2239
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2240
            env->tr.base |= (target_ulong)e3 << 32;
2241
        } else
2242
#endif
2243
        {
2244
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2245
        }
2246
        e2 |= DESC_TSS_BUSY_MASK;
2247
        stl_kernel(ptr + 4, e2);
2248
    }
2249
    env->tr.selector = selector;
2250
}
2251

    
2252
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2253
void helper_load_seg(int seg_reg, int selector)
2254
{
2255
    uint32_t e1, e2;
2256
    int cpl, dpl, rpl;
2257
    SegmentCache *dt;
2258
    int index;
2259
    target_ulong ptr;
2260

    
2261
    selector &= 0xffff;
2262
    cpl = env->hflags & HF_CPL_MASK;
2263
    if ((selector & 0xfffc) == 0) {
2264
        /* null selector case */
2265
        if (seg_reg == R_SS
2266
#ifdef TARGET_X86_64
2267
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2268
#endif
2269
            ) {
2270
            raise_exception_err(env, EXCP0D_GPF, 0);
2271
        }
2272
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2273
    } else {
2274

    
2275
        if (selector & 0x4) {
2276
            dt = &env->ldt;
2277
        } else {
2278
            dt = &env->gdt;
2279
        }
2280
        index = selector & ~7;
2281
        if ((index + 7) > dt->limit) {
2282
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2283
        }
2284
        ptr = dt->base + index;
2285
        e1 = ldl_kernel(ptr);
2286
        e2 = ldl_kernel(ptr + 4);
2287

    
2288
        if (!(e2 & DESC_S_MASK)) {
2289
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2290
        }
2291
        rpl = selector & 3;
2292
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2293
        if (seg_reg == R_SS) {
2294
            /* must be writable segment */
2295
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
2296
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2297
            }
2298
            if (rpl != cpl || dpl != cpl) {
2299
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2300
            }
2301
        } else {
2302
            /* must be readable segment */
2303
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
2304
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2305
            }
2306

    
2307
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2308
                /* if not conforming code, test rights */
2309
                if (dpl < cpl || dpl < rpl) {
2310
                    raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2311
                }
2312
            }
2313
        }
2314

    
2315
        if (!(e2 & DESC_P_MASK)) {
2316
            if (seg_reg == R_SS) {
2317
                raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
2318
            } else {
2319
                raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2320
            }
2321
        }
2322

    
2323
        /* set the access bit if not already set */
2324
        if (!(e2 & DESC_A_MASK)) {
2325
            e2 |= DESC_A_MASK;
2326
            stl_kernel(ptr + 4, e2);
2327
        }
2328

    
2329
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2330
                       get_seg_base(e1, e2),
2331
                       get_seg_limit(e1, e2),
2332
                       e2);
2333
#if 0
2334
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2335
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2336
#endif
2337
    }
2338
}
2339

    
2340
/* protected mode jump */
2341
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2342
                           int next_eip_addend)
2343
{
2344
    int gate_cs, type;
2345
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2346
    target_ulong next_eip;
2347

    
2348
    if ((new_cs & 0xfffc) == 0) {
2349
        raise_exception_err(env, EXCP0D_GPF, 0);
2350
    }
2351
    if (load_segment(&e1, &e2, new_cs) != 0) {
2352
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2353
    }
2354
    cpl = env->hflags & HF_CPL_MASK;
2355
    if (e2 & DESC_S_MASK) {
2356
        if (!(e2 & DESC_CS_MASK)) {
2357
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2358
        }
2359
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2360
        if (e2 & DESC_C_MASK) {
2361
            /* conforming code segment */
2362
            if (dpl > cpl) {
2363
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2364
            }
2365
        } else {
2366
            /* non conforming code segment */
2367
            rpl = new_cs & 3;
2368
            if (rpl > cpl) {
2369
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2370
            }
2371
            if (dpl != cpl) {
2372
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2373
            }
2374
        }
2375
        if (!(e2 & DESC_P_MASK)) {
2376
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2377
        }
2378
        limit = get_seg_limit(e1, e2);
2379
        if (new_eip > limit &&
2380
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
2381
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2382
        }
2383
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384
                       get_seg_base(e1, e2), limit, e2);
2385
        EIP = new_eip;
2386
    } else {
2387
        /* jump to call or task gate */
2388
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2389
        rpl = new_cs & 3;
2390
        cpl = env->hflags & HF_CPL_MASK;
2391
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2392
        switch (type) {
2393
        case 1: /* 286 TSS */
2394
        case 9: /* 386 TSS */
2395
        case 5: /* task gate */
2396
            if (dpl < cpl || dpl < rpl) {
2397
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2398
            }
2399
            next_eip = env->eip + next_eip_addend;
2400
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401
            CC_OP = CC_OP_EFLAGS;
2402
            break;
2403
        case 4: /* 286 call gate */
2404
        case 12: /* 386 call gate */
2405
            if ((dpl < cpl) || (dpl < rpl)) {
2406
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2407
            }
2408
            if (!(e2 & DESC_P_MASK)) {
2409
                raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2410
            }
2411
            gate_cs = e1 >> 16;
2412
            new_eip = (e1 & 0xffff);
2413
            if (type == 12) {
2414
                new_eip |= (e2 & 0xffff0000);
2415
            }
2416
            if (load_segment(&e1, &e2, gate_cs) != 0) {
2417
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2418
            }
2419
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420
            /* must be code segment */
2421
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2422
                 (DESC_S_MASK | DESC_CS_MASK))) {
2423
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2424
            }
2425
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2426
                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
2427
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2428
            }
2429
            if (!(e2 & DESC_P_MASK)) {
2430
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2431
            }
2432
            limit = get_seg_limit(e1, e2);
2433
            if (new_eip > limit) {
2434
                raise_exception_err(env, EXCP0D_GPF, 0);
2435
            }
2436
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2437
                                   get_seg_base(e1, e2), limit, e2);
2438
            EIP = new_eip;
2439
            break;
2440
        default:
2441
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2442
            break;
2443
        }
2444
    }
2445
}
2446

    
2447
/* real mode call */
2448
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2449
                       int shift, int next_eip)
2450
{
2451
    int new_eip;
2452
    uint32_t esp, esp_mask;
2453
    target_ulong ssp;
2454

    
2455
    new_eip = new_eip1;
2456
    esp = ESP;
2457
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2458
    ssp = env->segs[R_SS].base;
2459
    if (shift) {
2460
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2461
        PUSHL(ssp, esp, esp_mask, next_eip);
2462
    } else {
2463
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2464
        PUSHW(ssp, esp, esp_mask, next_eip);
2465
    }
2466

    
2467
    SET_ESP(esp, esp_mask);
2468
    env->eip = new_eip;
2469
    env->segs[R_CS].selector = new_cs;
2470
    env->segs[R_CS].base = (new_cs << 4);
2471
}
2472

    
2473
/* protected mode call */
2474
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2475
                            int shift, int next_eip_addend)
2476
{
2477
    int new_stack, i;
2478
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2479
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2480
    uint32_t val, limit, old_sp_mask;
2481
    target_ulong ssp, old_ssp, next_eip;
2482

    
2483
    next_eip = env->eip + next_eip_addend;
2484
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2485
    LOG_PCALL_STATE(env);
2486
    if ((new_cs & 0xfffc) == 0) {
2487
        raise_exception_err(env, EXCP0D_GPF, 0);
2488
    }
2489
    if (load_segment(&e1, &e2, new_cs) != 0) {
2490
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2491
    }
2492
    cpl = env->hflags & HF_CPL_MASK;
2493
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2494
    if (e2 & DESC_S_MASK) {
2495
        if (!(e2 & DESC_CS_MASK)) {
2496
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2497
        }
2498
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2499
        if (e2 & DESC_C_MASK) {
2500
            /* conforming code segment */
2501
            if (dpl > cpl) {
2502
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2503
            }
2504
        } else {
2505
            /* non conforming code segment */
2506
            rpl = new_cs & 3;
2507
            if (rpl > cpl) {
2508
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2509
            }
2510
            if (dpl != cpl) {
2511
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2512
            }
2513
        }
2514
        if (!(e2 & DESC_P_MASK)) {
2515
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2516
        }
2517

    
2518
#ifdef TARGET_X86_64
2519
        /* XXX: check 16/32 bit cases in long mode */
2520
        if (shift == 2) {
2521
            target_ulong rsp;
2522

    
2523
            /* 64 bit case */
2524
            rsp = ESP;
2525
            PUSHQ(rsp, env->segs[R_CS].selector);
2526
            PUSHQ(rsp, next_eip);
2527
            /* from this point, not restartable */
2528
            ESP = rsp;
2529
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2530
                                   get_seg_base(e1, e2),
2531
                                   get_seg_limit(e1, e2), e2);
2532
            EIP = new_eip;
2533
        } else
2534
#endif
2535
        {
2536
            sp = ESP;
2537
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2538
            ssp = env->segs[R_SS].base;
2539
            if (shift) {
2540
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2541
                PUSHL(ssp, sp, sp_mask, next_eip);
2542
            } else {
2543
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2544
                PUSHW(ssp, sp, sp_mask, next_eip);
2545
            }
2546

    
2547
            limit = get_seg_limit(e1, e2);
2548
            if (new_eip > limit) {
2549
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2550
            }
2551
            /* from this point, not restartable */
2552
            SET_ESP(sp, sp_mask);
2553
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2554
                                   get_seg_base(e1, e2), limit, e2);
2555
            EIP = new_eip;
2556
        }
2557
    } else {
2558
        /* check gate type */
2559
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2560
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2561
        rpl = new_cs & 3;
2562
        switch (type) {
2563
        case 1: /* available 286 TSS */
2564
        case 9: /* available 386 TSS */
2565
        case 5: /* task gate */
2566
            if (dpl < cpl || dpl < rpl) {
2567
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2568
            }
2569
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2570
            CC_OP = CC_OP_EFLAGS;
2571
            return;
2572
        case 4: /* 286 call gate */
2573
        case 12: /* 386 call gate */
2574
            break;
2575
        default:
2576
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2577
            break;
2578
        }
2579
        shift = type >> 3;
2580

    
2581
        if (dpl < cpl || dpl < rpl) {
2582
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2583
        }
2584
        /* check valid bit */
2585
        if (!(e2 & DESC_P_MASK)) {
2586
            raise_exception_err(env, EXCP0B_NOSEG,  new_cs & 0xfffc);
2587
        }
2588
        selector = e1 >> 16;
2589
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2590
        param_count = e2 & 0x1f;
2591
        if ((selector & 0xfffc) == 0) {
2592
            raise_exception_err(env, EXCP0D_GPF, 0);
2593
        }
2594

    
2595
        if (load_segment(&e1, &e2, selector) != 0) {
2596
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2597
        }
2598
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
2599
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2600
        }
2601
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2602
        if (dpl > cpl) {
2603
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2604
        }
2605
        if (!(e2 & DESC_P_MASK)) {
2606
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2607
        }
2608

    
2609
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2610
            /* to inner privilege */
2611
            get_ss_esp_from_tss(&ss, &sp, dpl);
2612
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2613
                      "\n",
2614
                      ss, sp, param_count, ESP);
2615
            if ((ss & 0xfffc) == 0) {
2616
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2617
            }
2618
            if ((ss & 3) != dpl) {
2619
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2620
            }
2621
            if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
2622
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2623
            }
2624
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2625
            if (ss_dpl != dpl) {
2626
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2627
            }
2628
            if (!(ss_e2 & DESC_S_MASK) ||
2629
                (ss_e2 & DESC_CS_MASK) ||
2630
                !(ss_e2 & DESC_W_MASK)) {
2631
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2632
            }
2633
            if (!(ss_e2 & DESC_P_MASK)) {
2634
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2635
            }
2636

    
2637
            /* push_size = ((param_count * 2) + 8) << shift; */
2638

    
2639
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2640
            old_ssp = env->segs[R_SS].base;
2641

    
2642
            sp_mask = get_sp_mask(ss_e2);
2643
            ssp = get_seg_base(ss_e1, ss_e2);
2644
            if (shift) {
2645
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2646
                PUSHL(ssp, sp, sp_mask, ESP);
2647
                for (i = param_count - 1; i >= 0; i--) {
2648
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2649
                    PUSHL(ssp, sp, sp_mask, val);
2650
                }
2651
            } else {
2652
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2653
                PUSHW(ssp, sp, sp_mask, ESP);
2654
                for (i = param_count - 1; i >= 0; i--) {
2655
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2656
                    PUSHW(ssp, sp, sp_mask, val);
2657
                }
2658
            }
2659
            new_stack = 1;
2660
        } else {
2661
            /* to same privilege */
2662
            sp = ESP;
2663
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2664
            ssp = env->segs[R_SS].base;
2665
            /* push_size = (4 << shift); */
2666
            new_stack = 0;
2667
        }
2668

    
2669
        if (shift) {
2670
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2671
            PUSHL(ssp, sp, sp_mask, next_eip);
2672
        } else {
2673
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2674
            PUSHW(ssp, sp, sp_mask, next_eip);
2675
        }
2676

    
2677
        /* from this point, not restartable */
2678

    
2679
        if (new_stack) {
2680
            ss = (ss & ~3) | dpl;
2681
            cpu_x86_load_seg_cache(env, R_SS, ss,
2682
                                   ssp,
2683
                                   get_seg_limit(ss_e1, ss_e2),
2684
                                   ss_e2);
2685
        }
2686

    
2687
        selector = (selector & ~3) | dpl;
2688
        cpu_x86_load_seg_cache(env, R_CS, selector,
2689
                       get_seg_base(e1, e2),
2690
                       get_seg_limit(e1, e2),
2691
                       e2);
2692
        cpu_x86_set_cpl(env, dpl);
2693
        SET_ESP(sp, sp_mask);
2694
        EIP = offset;
2695
    }
2696
}
2697

    
2698
/* real and vm86 mode iret */
2699
void helper_iret_real(int shift)
2700
{
2701
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2702
    target_ulong ssp;
2703
    int eflags_mask;
2704

    
2705
    sp_mask = 0xffff; /* XXXX: use SS segment size? */
2706
    sp = ESP;
2707
    ssp = env->segs[R_SS].base;
2708
    if (shift == 1) {
2709
        /* 32 bits */
2710
        POPL(ssp, sp, sp_mask, new_eip);
2711
        POPL(ssp, sp, sp_mask, new_cs);
2712
        new_cs &= 0xffff;
2713
        POPL(ssp, sp, sp_mask, new_eflags);
2714
    } else {
2715
        /* 16 bits */
2716
        POPW(ssp, sp, sp_mask, new_eip);
2717
        POPW(ssp, sp, sp_mask, new_cs);
2718
        POPW(ssp, sp, sp_mask, new_eflags);
2719
    }
2720
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2721
    env->segs[R_CS].selector = new_cs;
2722
    env->segs[R_CS].base = (new_cs << 4);
2723
    env->eip = new_eip;
2724
    if (env->eflags & VM_MASK) {
2725
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2726
            NT_MASK;
2727
    } else {
2728
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2729
            RF_MASK | NT_MASK;
2730
    }
2731
    if (shift == 0) {
2732
        eflags_mask &= 0xffff;
2733
    }
2734
    load_eflags(new_eflags, eflags_mask);
2735
    env->hflags2 &= ~HF2_NMI_MASK;
2736
}
2737

    
2738
static inline void validate_seg(int seg_reg, int cpl)
2739
{
2740
    int dpl;
2741
    uint32_t e2;
2742

    
2743
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2744
       they may still contain a valid base. I would be interested to
2745
       know how a real x86_64 CPU behaves */
2746
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2747
        (env->segs[seg_reg].selector & 0xfffc) == 0) {
2748
        return;
2749
    }
2750

    
2751
    e2 = env->segs[seg_reg].flags;
2752
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2753
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2754
        /* data or non conforming code segment */
2755
        if (dpl < cpl) {
2756
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2757
        }
2758
    }
2759
}
2760

    
2761
/* protected mode iret */
2762
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2763
{
2764
    uint32_t new_cs, new_eflags, new_ss;
2765
    uint32_t new_es, new_ds, new_fs, new_gs;
2766
    uint32_t e1, e2, ss_e1, ss_e2;
2767
    int cpl, dpl, rpl, eflags_mask, iopl;
2768
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2769

    
2770
#ifdef TARGET_X86_64
2771
    if (shift == 2) {
2772
        sp_mask = -1;
2773
    } else
2774
#endif
2775
    {
2776
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2777
    }
2778
    sp = ESP;
2779
    ssp = env->segs[R_SS].base;
2780
    new_eflags = 0; /* avoid warning */
2781
#ifdef TARGET_X86_64
2782
    if (shift == 2) {
2783
        POPQ(sp, new_eip);
2784
        POPQ(sp, new_cs);
2785
        new_cs &= 0xffff;
2786
        if (is_iret) {
2787
            POPQ(sp, new_eflags);
2788
        }
2789
    } else
2790
#endif
2791
    {
2792
        if (shift == 1) {
2793
            /* 32 bits */
2794
            POPL(ssp, sp, sp_mask, new_eip);
2795
            POPL(ssp, sp, sp_mask, new_cs);
2796
            new_cs &= 0xffff;
2797
            if (is_iret) {
2798
                POPL(ssp, sp, sp_mask, new_eflags);
2799
                if (new_eflags & VM_MASK) {
2800
                    goto return_to_vm86;
2801
                }
2802
            }
2803
        } else {
2804
            /* 16 bits */
2805
            POPW(ssp, sp, sp_mask, new_eip);
2806
            POPW(ssp, sp, sp_mask, new_cs);
2807
            if (is_iret) {
2808
                POPW(ssp, sp, sp_mask, new_eflags);
2809
            }
2810
        }
2811
    }
2812
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2813
              new_cs, new_eip, shift, addend);
2814
    LOG_PCALL_STATE(env);
2815
    if ((new_cs & 0xfffc) == 0) {
2816
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2817
    }
2818
    if (load_segment(&e1, &e2, new_cs) != 0) {
2819
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2820
    }
2821
    if (!(e2 & DESC_S_MASK) ||
2822
        !(e2 & DESC_CS_MASK)) {
2823
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2824
    }
2825
    cpl = env->hflags & HF_CPL_MASK;
2826
    rpl = new_cs & 3;
2827
    if (rpl < cpl) {
2828
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2829
    }
2830
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2831
    if (e2 & DESC_C_MASK) {
2832
        if (dpl > rpl) {
2833
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2834
        }
2835
    } else {
2836
        if (dpl != rpl) {
2837
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2838
        }
2839
    }
2840
    if (!(e2 & DESC_P_MASK)) {
2841
        raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2842
    }
2843

    
2844
    sp += addend;
2845
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2846
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2847
        /* return to same privilege level */
2848
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2849
                       get_seg_base(e1, e2),
2850
                       get_seg_limit(e1, e2),
2851
                       e2);
2852
    } else {
2853
        /* return to different privilege level */
2854
#ifdef TARGET_X86_64
2855
        if (shift == 2) {
2856
            POPQ(sp, new_esp);
2857
            POPQ(sp, new_ss);
2858
            new_ss &= 0xffff;
2859
        } else
2860
#endif
2861
        {
2862
            if (shift == 1) {
2863
                /* 32 bits */
2864
                POPL(ssp, sp, sp_mask, new_esp);
2865
                POPL(ssp, sp, sp_mask, new_ss);
2866
                new_ss &= 0xffff;
2867
            } else {
2868
                /* 16 bits */
2869
                POPW(ssp, sp, sp_mask, new_esp);
2870
                POPW(ssp, sp, sp_mask, new_ss);
2871
            }
2872
        }
2873
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2874
                  new_ss, new_esp);
2875
        if ((new_ss & 0xfffc) == 0) {
2876
#ifdef TARGET_X86_64
2877
            /* NULL ss is allowed in long mode if cpl != 3 */
2878
            /* XXX: test CS64? */
2879
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2880
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2881
                                       0, 0xffffffff,
2882
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2883
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2884
                                       DESC_W_MASK | DESC_A_MASK);
2885
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2886
            } else
2887
#endif
2888
            {
2889
                raise_exception_err(env, EXCP0D_GPF, 0);
2890
            }
2891
        } else {
2892
            if ((new_ss & 3) != rpl) {
2893
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2894
            }
2895
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
2896
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2897
            }
2898
            if (!(ss_e2 & DESC_S_MASK) ||
2899
                (ss_e2 & DESC_CS_MASK) ||
2900
                !(ss_e2 & DESC_W_MASK)) {
2901
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2902
            }
2903
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2904
            if (dpl != rpl) {
2905
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2906
            }
2907
            if (!(ss_e2 & DESC_P_MASK)) {
2908
                raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2909
            }
2910
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2911
                                   get_seg_base(ss_e1, ss_e2),
2912
                                   get_seg_limit(ss_e1, ss_e2),
2913
                                   ss_e2);
2914
        }
2915

    
2916
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2917
                       get_seg_base(e1, e2),
2918
                       get_seg_limit(e1, e2),
2919
                       e2);
2920
        cpu_x86_set_cpl(env, rpl);
2921
        sp = new_esp;
2922
#ifdef TARGET_X86_64
2923
        if (env->hflags & HF_CS64_MASK) {
2924
            sp_mask = -1;
2925
        } else
2926
#endif
2927
        {
2928
            sp_mask = get_sp_mask(ss_e2);
2929
        }
2930

    
2931
        /* validate data segments */
2932
        validate_seg(R_ES, rpl);
2933
        validate_seg(R_DS, rpl);
2934
        validate_seg(R_FS, rpl);
2935
        validate_seg(R_GS, rpl);
2936

    
2937
        sp += addend;
2938
    }
2939
    SET_ESP(sp, sp_mask);
2940
    env->eip = new_eip;
2941
    if (is_iret) {
2942
        /* NOTE: 'cpl' is the _old_ CPL */
2943
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2944
        if (cpl == 0) {
2945
            eflags_mask |= IOPL_MASK;
2946
        }
2947
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2948
        if (cpl <= iopl) {
2949
            eflags_mask |= IF_MASK;
2950
        }
2951
        if (shift == 0) {
2952
            eflags_mask &= 0xffff;
2953
        }
2954
        load_eflags(new_eflags, eflags_mask);
2955
    }
2956
    return;
2957

    
2958
 return_to_vm86:
2959
    POPL(ssp, sp, sp_mask, new_esp);
2960
    POPL(ssp, sp, sp_mask, new_ss);
2961
    POPL(ssp, sp, sp_mask, new_es);
2962
    POPL(ssp, sp, sp_mask, new_ds);
2963
    POPL(ssp, sp, sp_mask, new_fs);
2964
    POPL(ssp, sp, sp_mask, new_gs);
2965

    
2966
    /* modify processor state */
2967
    load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2968
                IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2969
    load_seg_vm(R_CS, new_cs & 0xffff);
2970
    cpu_x86_set_cpl(env, 3);
2971
    load_seg_vm(R_SS, new_ss & 0xffff);
2972
    load_seg_vm(R_ES, new_es & 0xffff);
2973
    load_seg_vm(R_DS, new_ds & 0xffff);
2974
    load_seg_vm(R_FS, new_fs & 0xffff);
2975
    load_seg_vm(R_GS, new_gs & 0xffff);
2976

    
2977
    env->eip = new_eip & 0xffff;
2978
    ESP = new_esp;
2979
}
2980

    
2981
void helper_iret_protected(int shift, int next_eip)
2982
{
2983
    int tss_selector, type;
2984
    uint32_t e1, e2;
2985

    
2986
    /* specific case for TSS */
2987
    if (env->eflags & NT_MASK) {
2988
#ifdef TARGET_X86_64
2989
        if (env->hflags & HF_LMA_MASK) {
2990
            raise_exception_err(env, EXCP0D_GPF, 0);
2991
        }
2992
#endif
2993
        tss_selector = lduw_kernel(env->tr.base + 0);
2994
        if (tss_selector & 4) {
2995
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2996
        }
2997
        if (load_segment(&e1, &e2, tss_selector) != 0) {
2998
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2999
        }
3000
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3001
        /* NOTE: we check both segment and busy TSS */
3002
        if (type != 3) {
3003
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3004
        }
3005
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3006
    } else {
3007
        helper_ret_protected(shift, 1, 0);
3008
    }
3009
    env->hflags2 &= ~HF2_NMI_MASK;
3010
}
3011

    
3012
void helper_lret_protected(int shift, int addend)
3013
{
3014
    helper_ret_protected(shift, 0, addend);
3015
}
3016

    
3017
void helper_sysenter(void)
3018
{
3019
    if (env->sysenter_cs == 0) {
3020
        raise_exception_err(env, EXCP0D_GPF, 0);
3021
    }
3022
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3023
    cpu_x86_set_cpl(env, 0);
3024

    
3025
#ifdef TARGET_X86_64
3026
    if (env->hflags & HF_LMA_MASK) {
3027
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3028
                               0, 0xffffffff,
3029
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3030
                               DESC_S_MASK |
3031
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3032
                               DESC_L_MASK);
3033
    } else
3034
#endif
3035
    {
3036
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3037
                               0, 0xffffffff,
3038
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3039
                               DESC_S_MASK |
3040
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3041
    }
3042
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3043
                           0, 0xffffffff,
3044
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3045
                           DESC_S_MASK |
3046
                           DESC_W_MASK | DESC_A_MASK);
3047
    ESP = env->sysenter_esp;
3048
    EIP = env->sysenter_eip;
3049
}
3050

    
3051
void helper_sysexit(int dflag)
3052
{
3053
    int cpl;
3054

    
3055
    cpl = env->hflags & HF_CPL_MASK;
3056
    if (env->sysenter_cs == 0 || cpl != 0) {
3057
        raise_exception_err(env, EXCP0D_GPF, 0);
3058
    }
3059
    cpu_x86_set_cpl(env, 3);
3060
#ifdef TARGET_X86_64
3061
    if (dflag == 2) {
3062
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
3063
                               3, 0, 0xffffffff,
3064
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3065
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3066
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3067
                               DESC_L_MASK);
3068
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
3069
                               3, 0, 0xffffffff,
3070
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3071
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3072
                               DESC_W_MASK | DESC_A_MASK);
3073
    } else
3074
#endif
3075
    {
3076
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
3077
                               3, 0, 0xffffffff,
3078
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3079
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3080
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3081
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
3082
                               3, 0, 0xffffffff,
3083
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3084
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3085
                               DESC_W_MASK | DESC_A_MASK);
3086
    }
3087
    ESP = ECX;
3088
    EIP = EDX;
3089
}
3090

    
3091
#if defined(CONFIG_USER_ONLY)
3092
target_ulong helper_read_crN(int reg)
3093
{
3094
    return 0;
3095
}
3096

    
3097
void helper_write_crN(int reg, target_ulong t0)
3098
{
3099
}
3100

    
3101
void helper_movl_drN_T0(int reg, target_ulong t0)
3102
{
3103
}
3104
#else
3105
target_ulong helper_read_crN(int reg)
3106
{
3107
    target_ulong val;
3108

    
3109
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3110
    switch (reg) {
3111
    default:
3112
        val = env->cr[reg];
3113
        break;
3114
    case 8:
3115
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3116
            val = cpu_get_apic_tpr(env->apic_state);
3117
        } else {
3118
            val = env->v_tpr;
3119
        }
3120
        break;
3121
    }
3122
    return val;
3123
}
3124

    
3125
void helper_write_crN(int reg, target_ulong t0)
3126
{
3127
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3128
    switch (reg) {
3129
    case 0:
3130
        cpu_x86_update_cr0(env, t0);
3131
        break;
3132
    case 3:
3133
        cpu_x86_update_cr3(env, t0);
3134
        break;
3135
    case 4:
3136
        cpu_x86_update_cr4(env, t0);
3137
        break;
3138
    case 8:
3139
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3140
            cpu_set_apic_tpr(env->apic_state, t0);
3141
        }
3142
        env->v_tpr = t0 & 0x0f;
3143
        break;
3144
    default:
3145
        env->cr[reg] = t0;
3146
        break;
3147
    }
3148
}
3149

    
3150
void helper_movl_drN_T0(int reg, target_ulong t0)
3151
{
3152
    int i;
3153

    
3154
    if (reg < 4) {
3155
        hw_breakpoint_remove(env, reg);
3156
        env->dr[reg] = t0;
3157
        hw_breakpoint_insert(env, reg);
3158
    } else if (reg == 7) {
3159
        for (i = 0; i < 4; i++) {
3160
            hw_breakpoint_remove(env, i);
3161
        }
3162
        env->dr[7] = t0;
3163
        for (i = 0; i < 4; i++) {
3164
            hw_breakpoint_insert(env, i);
3165
        }
3166
    } else {
3167
        env->dr[reg] = t0;
3168
    }
3169
}
3170
#endif
3171

    
3172
void helper_lmsw(target_ulong t0)
3173
{
3174
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3175
       if already set to one. */
3176
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3177
    helper_write_crN(0, t0);
3178
}
3179

    
3180
void helper_clts(void)
3181
{
3182
    env->cr[0] &= ~CR0_TS_MASK;
3183
    env->hflags &= ~HF_TS_MASK;
3184
}
3185

    
3186
void helper_invlpg(target_ulong addr)
3187
{
3188
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3189
    tlb_flush_page(env, addr);
3190
}
3191

    
3192
void helper_rdtsc(void)
3193
{
3194
    uint64_t val;
3195

    
3196
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3197
        raise_exception(env, EXCP0D_GPF);
3198
    }
3199
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3200

    
3201
    val = cpu_get_tsc(env) + env->tsc_offset;
3202
    EAX = (uint32_t)(val);
3203
    EDX = (uint32_t)(val >> 32);
3204
}
3205

    
3206
void helper_rdtscp(void)
3207
{
3208
    helper_rdtsc();
3209
    ECX = (uint32_t)(env->tsc_aux);
3210
}
3211

    
3212
void helper_rdpmc(void)
3213
{
3214
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3215
        raise_exception(env, EXCP0D_GPF);
3216
    }
3217
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3218

    
3219
    /* currently unimplemented */
3220
    qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
3221
    raise_exception_err(env, EXCP06_ILLOP, 0);
3222
}
3223

    
3224
#if defined(CONFIG_USER_ONLY)
3225
void helper_wrmsr(void)
3226
{
3227
}
3228

    
3229
void helper_rdmsr(void)
3230
{
3231
}
3232
#else
3233
void helper_wrmsr(void)
3234
{
3235
    uint64_t val;
3236

    
3237
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3238

    
3239
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3240

    
3241
    switch ((uint32_t)ECX) {
3242
    case MSR_IA32_SYSENTER_CS:
3243
        env->sysenter_cs = val & 0xffff;
3244
        break;
3245
    case MSR_IA32_SYSENTER_ESP:
3246
        env->sysenter_esp = val;
3247
        break;
3248
    case MSR_IA32_SYSENTER_EIP:
3249
        env->sysenter_eip = val;
3250
        break;
3251
    case MSR_IA32_APICBASE:
3252
        cpu_set_apic_base(env->apic_state, val);
3253
        break;
3254
    case MSR_EFER:
3255
        {
3256
            uint64_t update_mask;
3257

    
3258
            update_mask = 0;
3259
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
3260
                update_mask |= MSR_EFER_SCE;
3261
            }
3262
            if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
3263
                update_mask |= MSR_EFER_LME;
3264
            }
3265
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
3266
                update_mask |= MSR_EFER_FFXSR;
3267
            }
3268
            if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
3269
                update_mask |= MSR_EFER_NXE;
3270
            }
3271
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
3272
                update_mask |= MSR_EFER_SVME;
3273
            }
3274
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
3275
                update_mask |= MSR_EFER_FFXSR;
3276
            }
3277
            cpu_load_efer(env, (env->efer & ~update_mask) |
3278
                          (val & update_mask));
3279
        }
3280
        break;
3281
    case MSR_STAR:
3282
        env->star = val;
3283
        break;
3284
    case MSR_PAT:
3285
        env->pat = val;
3286
        break;
3287
    case MSR_VM_HSAVE_PA:
3288
        env->vm_hsave = val;
3289
        break;
3290
#ifdef TARGET_X86_64
3291
    case MSR_LSTAR:
3292
        env->lstar = val;
3293
        break;
3294
    case MSR_CSTAR:
3295
        env->cstar = val;
3296
        break;
3297
    case MSR_FMASK:
3298
        env->fmask = val;
3299
        break;
3300
    case MSR_FSBASE:
3301
        env->segs[R_FS].base = val;
3302
        break;
3303
    case MSR_GSBASE:
3304
        env->segs[R_GS].base = val;
3305
        break;
3306
    case MSR_KERNELGSBASE:
3307
        env->kernelgsbase = val;
3308
        break;
3309
#endif
3310
    case MSR_MTRRphysBase(0):
3311
    case MSR_MTRRphysBase(1):
3312
    case MSR_MTRRphysBase(2):
3313
    case MSR_MTRRphysBase(3):
3314
    case MSR_MTRRphysBase(4):
3315
    case MSR_MTRRphysBase(5):
3316
    case MSR_MTRRphysBase(6):
3317
    case MSR_MTRRphysBase(7):
3318
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3319
        break;
3320
    case MSR_MTRRphysMask(0):
3321
    case MSR_MTRRphysMask(1):
3322
    case MSR_MTRRphysMask(2):
3323
    case MSR_MTRRphysMask(3):
3324
    case MSR_MTRRphysMask(4):
3325
    case MSR_MTRRphysMask(5):
3326
    case MSR_MTRRphysMask(6):
3327
    case MSR_MTRRphysMask(7):
3328
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3329
        break;
3330
    case MSR_MTRRfix64K_00000:
3331
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3332
        break;
3333
    case MSR_MTRRfix16K_80000:
3334
    case MSR_MTRRfix16K_A0000:
3335
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3336
        break;
3337
    case MSR_MTRRfix4K_C0000:
3338
    case MSR_MTRRfix4K_C8000:
3339
    case MSR_MTRRfix4K_D0000:
3340
    case MSR_MTRRfix4K_D8000:
3341
    case MSR_MTRRfix4K_E0000:
3342
    case MSR_MTRRfix4K_E8000:
3343
    case MSR_MTRRfix4K_F0000:
3344
    case MSR_MTRRfix4K_F8000:
3345
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3346
        break;
3347
    case MSR_MTRRdefType:
3348
        env->mtrr_deftype = val;
3349
        break;
3350
    case MSR_MCG_STATUS:
3351
        env->mcg_status = val;
3352
        break;
3353
    case MSR_MCG_CTL:
3354
        if ((env->mcg_cap & MCG_CTL_P)
3355
            && (val == 0 || val == ~(uint64_t)0)) {
3356
            env->mcg_ctl = val;
3357
        }
3358
        break;
3359
    case MSR_TSC_AUX:
3360
        env->tsc_aux = val;
3361
        break;
3362
    case MSR_IA32_MISC_ENABLE:
3363
        env->msr_ia32_misc_enable = val;
3364
        break;
3365
    default:
3366
        if ((uint32_t)ECX >= MSR_MC0_CTL
3367
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3368
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3369
            if ((offset & 0x3) != 0
3370
                || (val == 0 || val == ~(uint64_t)0)) {
3371
                env->mce_banks[offset] = val;
3372
            }
3373
            break;
3374
        }
3375
        /* XXX: exception? */
3376
        break;
3377
    }
3378
}
3379

    
3380
void helper_rdmsr(void)
3381
{
3382
    uint64_t val;
3383

    
3384
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3385

    
3386
    switch ((uint32_t)ECX) {
3387
    case MSR_IA32_SYSENTER_CS:
3388
        val = env->sysenter_cs;
3389
        break;
3390
    case MSR_IA32_SYSENTER_ESP:
3391
        val = env->sysenter_esp;
3392
        break;
3393
    case MSR_IA32_SYSENTER_EIP:
3394
        val = env->sysenter_eip;
3395
        break;
3396
    case MSR_IA32_APICBASE:
3397
        val = cpu_get_apic_base(env->apic_state);
3398
        break;
3399
    case MSR_EFER:
3400
        val = env->efer;
3401
        break;
3402
    case MSR_STAR:
3403
        val = env->star;
3404
        break;
3405
    case MSR_PAT:
3406
        val = env->pat;
3407
        break;
3408
    case MSR_VM_HSAVE_PA:
3409
        val = env->vm_hsave;
3410
        break;
3411
    case MSR_IA32_PERF_STATUS:
3412
        /* tsc_increment_by_tick */
3413
        val = 1000ULL;
3414
        /* CPU multiplier */
3415
        val |= (((uint64_t)4ULL) << 40);
3416
        break;
3417
#ifdef TARGET_X86_64
3418
    case MSR_LSTAR:
3419
        val = env->lstar;
3420
        break;
3421
    case MSR_CSTAR:
3422
        val = env->cstar;
3423
        break;
3424
    case MSR_FMASK:
3425
        val = env->fmask;
3426
        break;
3427
    case MSR_FSBASE:
3428
        val = env->segs[R_FS].base;
3429
        break;
3430
    case MSR_GSBASE:
3431
        val = env->segs[R_GS].base;
3432
        break;
3433
    case MSR_KERNELGSBASE:
3434
        val = env->kernelgsbase;
3435
        break;
3436
    case MSR_TSC_AUX:
3437
        val = env->tsc_aux;
3438
        break;
3439
#endif
3440
    case MSR_MTRRphysBase(0):
3441
    case MSR_MTRRphysBase(1):
3442
    case MSR_MTRRphysBase(2):
3443
    case MSR_MTRRphysBase(3):
3444
    case MSR_MTRRphysBase(4):
3445
    case MSR_MTRRphysBase(5):
3446
    case MSR_MTRRphysBase(6):
3447
    case MSR_MTRRphysBase(7):
3448
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3449
        break;
3450
    case MSR_MTRRphysMask(0):
3451
    case MSR_MTRRphysMask(1):
3452
    case MSR_MTRRphysMask(2):
3453
    case MSR_MTRRphysMask(3):
3454
    case MSR_MTRRphysMask(4):
3455
    case MSR_MTRRphysMask(5):
3456
    case MSR_MTRRphysMask(6):
3457
    case MSR_MTRRphysMask(7):
3458
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3459
        break;
3460
    case MSR_MTRRfix64K_00000:
3461
        val = env->mtrr_fixed[0];
3462
        break;
3463
    case MSR_MTRRfix16K_80000:
3464
    case MSR_MTRRfix16K_A0000:
3465
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3466
        break;
3467
    case MSR_MTRRfix4K_C0000:
3468
    case MSR_MTRRfix4K_C8000:
3469
    case MSR_MTRRfix4K_D0000:
3470
    case MSR_MTRRfix4K_D8000:
3471
    case MSR_MTRRfix4K_E0000:
3472
    case MSR_MTRRfix4K_E8000:
3473
    case MSR_MTRRfix4K_F0000:
3474
    case MSR_MTRRfix4K_F8000:
3475
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3476
        break;
3477
    case MSR_MTRRdefType:
3478
        val = env->mtrr_deftype;
3479
        break;
3480
    case MSR_MTRRcap:
3481
        if (env->cpuid_features & CPUID_MTRR) {
3482
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3483
                MSR_MTRRcap_WC_SUPPORTED;
3484
        } else {
3485
            /* XXX: exception? */
3486
            val = 0;
3487
        }
3488
        break;
3489
    case MSR_MCG_CAP:
3490
        val = env->mcg_cap;
3491
        break;
3492
    case MSR_MCG_CTL:
3493
        if (env->mcg_cap & MCG_CTL_P) {
3494
            val = env->mcg_ctl;
3495
        } else {
3496
            val = 0;
3497
        }
3498
        break;
3499
    case MSR_MCG_STATUS:
3500
        val = env->mcg_status;
3501
        break;
3502
    case MSR_IA32_MISC_ENABLE:
3503
        val = env->msr_ia32_misc_enable;
3504
        break;
3505
    default:
3506
        if ((uint32_t)ECX >= MSR_MC0_CTL
3507
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3508
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3509
            val = env->mce_banks[offset];
3510
            break;
3511
        }
3512
        /* XXX: exception? */
3513
        val = 0;
3514
        break;
3515
    }
3516
    EAX = (uint32_t)(val);
3517
    EDX = (uint32_t)(val >> 32);
3518
}
3519
#endif
3520

    
3521
target_ulong helper_lsl(target_ulong selector1)
3522
{
3523
    unsigned int limit;
3524
    uint32_t e1, e2, eflags, selector;
3525
    int rpl, dpl, cpl, type;
3526

    
3527
    selector = selector1 & 0xffff;
3528
    eflags = helper_cc_compute_all(CC_OP);
3529
    if ((selector & 0xfffc) == 0) {
3530
        goto fail;
3531
    }
3532
    if (load_segment(&e1, &e2, selector) != 0) {
3533
        goto fail;
3534
    }
3535
    rpl = selector & 3;
3536
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3537
    cpl = env->hflags & HF_CPL_MASK;
3538
    if (e2 & DESC_S_MASK) {
3539
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3540
            /* conforming */
3541
        } else {
3542
            if (dpl < cpl || dpl < rpl) {
3543
                goto fail;
3544
            }
3545
        }
3546
    } else {
3547
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3548
        switch (type) {
3549
        case 1:
3550
        case 2:
3551
        case 3:
3552
        case 9:
3553
        case 11:
3554
            break;
3555
        default:
3556
            goto fail;
3557
        }
3558
        if (dpl < cpl || dpl < rpl) {
3559
        fail:
3560
            CC_SRC = eflags & ~CC_Z;
3561
            return 0;
3562
        }
3563
    }
3564
    limit = get_seg_limit(e1, e2);
3565
    CC_SRC = eflags | CC_Z;
3566
    return limit;
3567
}
3568

    
3569
target_ulong helper_lar(target_ulong selector1)
3570
{
3571
    uint32_t e1, e2, eflags, selector;
3572
    int rpl, dpl, cpl, type;
3573

    
3574
    selector = selector1 & 0xffff;
3575
    eflags = helper_cc_compute_all(CC_OP);
3576
    if ((selector & 0xfffc) == 0) {
3577
        goto fail;
3578
    }
3579
    if (load_segment(&e1, &e2, selector) != 0) {
3580
        goto fail;
3581
    }
3582
    rpl = selector & 3;
3583
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3584
    cpl = env->hflags & HF_CPL_MASK;
3585
    if (e2 & DESC_S_MASK) {
3586
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3587
            /* conforming */
3588
        } else {
3589
            if (dpl < cpl || dpl < rpl) {
3590
                goto fail;
3591
            }
3592
        }
3593
    } else {
3594
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3595
        switch (type) {
3596
        case 1:
3597
        case 2:
3598
        case 3:
3599
        case 4:
3600
        case 5:
3601
        case 9:
3602
        case 11:
3603
        case 12:
3604
            break;
3605
        default:
3606
            goto fail;
3607
        }
3608
        if (dpl < cpl || dpl < rpl) {
3609
        fail:
3610
            CC_SRC = eflags & ~CC_Z;
3611
            return 0;
3612
        }
3613
    }
3614
    CC_SRC = eflags | CC_Z;
3615
    return e2 & 0x00f0ff00;
3616
}
3617

    
3618
void helper_verr(target_ulong selector1)
3619
{
3620
    uint32_t e1, e2, eflags, selector;
3621
    int rpl, dpl, cpl;
3622

    
3623
    selector = selector1 & 0xffff;
3624
    eflags = helper_cc_compute_all(CC_OP);
3625
    if ((selector & 0xfffc) == 0) {
3626
        goto fail;
3627
    }
3628
    if (load_segment(&e1, &e2, selector) != 0) {
3629
        goto fail;
3630
    }
3631
    if (!(e2 & DESC_S_MASK)) {
3632
        goto fail;
3633
    }
3634
    rpl = selector & 3;
3635
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3636
    cpl = env->hflags & HF_CPL_MASK;
3637
    if (e2 & DESC_CS_MASK) {
3638
        if (!(e2 & DESC_R_MASK)) {
3639
            goto fail;
3640
        }
3641
        if (!(e2 & DESC_C_MASK)) {
3642
            if (dpl < cpl || dpl < rpl) {
3643
                goto fail;
3644
            }
3645
        }
3646
    } else {
3647
        if (dpl < cpl || dpl < rpl) {
3648
        fail:
3649
            CC_SRC = eflags & ~CC_Z;
3650
            return;
3651
        }
3652
    }
3653
    CC_SRC = eflags | CC_Z;
3654
}
3655

    
3656
void helper_verw(target_ulong selector1)
3657
{
3658
    uint32_t e1, e2, eflags, selector;
3659
    int rpl, dpl, cpl;
3660

    
3661
    selector = selector1 & 0xffff;
3662
    eflags = helper_cc_compute_all(CC_OP);
3663
    if ((selector & 0xfffc) == 0) {
3664
        goto fail;
3665
    }
3666
    if (load_segment(&e1, &e2, selector) != 0) {
3667
        goto fail;
3668
    }
3669
    if (!(e2 & DESC_S_MASK)) {
3670
        goto fail;
3671
    }
3672
    rpl = selector & 3;
3673
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3674
    cpl = env->hflags & HF_CPL_MASK;
3675
    if (e2 & DESC_CS_MASK) {
3676
        goto fail;
3677
    } else {
3678
        if (dpl < cpl || dpl < rpl) {
3679
            goto fail;
3680
        }
3681
        if (!(e2 & DESC_W_MASK)) {
3682
        fail:
3683
            CC_SRC = eflags & ~CC_Z;
3684
            return;
3685
        }
3686
    }
3687
    CC_SRC = eflags | CC_Z;
3688
}
3689

    
3690
#if defined(CONFIG_USER_ONLY)
3691
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
3692
{
3693
    CPUX86State *saved_env;
3694

    
3695
    saved_env = env;
3696
    env = s;
3697
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3698
        selector &= 0xffff;
3699
        cpu_x86_load_seg_cache(env, seg_reg, selector,
3700
                               (selector << 4), 0xffff, 0);
3701
    } else {
3702
        helper_load_seg(seg_reg, selector);
3703
    }
3704
    env = saved_env;
3705
}
3706
#endif
3707

    
3708
#ifdef TARGET_X86_64
3709
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3710
{
3711
    *plow += a;
3712
    /* carry test */
3713
    if (*plow < a) {
3714
        (*phigh)++;
3715
    }
3716
    *phigh += b;
3717
}
3718

    
3719
static void neg128(uint64_t *plow, uint64_t *phigh)
3720
{
3721
    *plow = ~*plow;
3722
    *phigh = ~*phigh;
3723
    add128(plow, phigh, 1, 0);
3724
}
3725

    
3726
/* return TRUE if overflow */
3727
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3728
{
3729
    uint64_t q, r, a1, a0;
3730
    int i, qb, ab;
3731

    
3732
    a0 = *plow;
3733
    a1 = *phigh;
3734
    if (a1 == 0) {
3735
        q = a0 / b;
3736
        r = a0 % b;
3737
        *plow = q;
3738
        *phigh = r;
3739
    } else {
3740
        if (a1 >= b) {
3741
            return 1;
3742
        }
3743
        /* XXX: use a better algorithm */
3744
        for (i = 0; i < 64; i++) {
3745
            ab = a1 >> 63;
3746
            a1 = (a1 << 1) | (a0 >> 63);
3747
            if (ab || a1 >= b) {
3748
                a1 -= b;
3749
                qb = 1;
3750
            } else {
3751
                qb = 0;
3752
            }
3753
            a0 = (a0 << 1) | qb;
3754
        }
3755
#if defined(DEBUG_MULDIV)
3756
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
3757
               ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3758
               *phigh, *plow, b, a0, a1);
3759
#endif
3760
        *plow = a0;
3761
        *phigh = a1;
3762
    }
3763
    return 0;
3764
}
3765

    
3766
/* return TRUE if overflow */
3767
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3768
{
3769
    int sa, sb;
3770

    
3771
    sa = ((int64_t)*phigh < 0);
3772
    if (sa) {
3773
        neg128(plow, phigh);
3774
    }
3775
    sb = (b < 0);
3776
    if (sb) {
3777
        b = -b;
3778
    }
3779
    if (div64(plow, phigh, b) != 0) {
3780
        return 1;
3781
    }
3782
    if (sa ^ sb) {
3783
        if (*plow > (1ULL << 63)) {
3784
            return 1;
3785
        }
3786
        *plow = -*plow;
3787
    } else {
3788
        if (*plow >= (1ULL << 63)) {
3789
            return 1;
3790
        }
3791
    }
3792
    if (sa) {
3793
        *phigh = -*phigh;
3794
    }
3795
    return 0;
3796
}
3797

    
3798
void helper_mulq_EAX_T0(target_ulong t0)
3799
{
3800
    uint64_t r0, r1;
3801

    
3802
    mulu64(&r0, &r1, EAX, t0);
3803
    EAX = r0;
3804
    EDX = r1;
3805
    CC_DST = r0;
3806
    CC_SRC = r1;
3807
}
3808

    
3809
void helper_imulq_EAX_T0(target_ulong t0)
3810
{
3811
    uint64_t r0, r1;
3812

    
3813
    muls64(&r0, &r1, EAX, t0);
3814
    EAX = r0;
3815
    EDX = r1;
3816
    CC_DST = r0;
3817
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3818
}
3819

    
3820
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
3821
{
3822
    uint64_t r0, r1;
3823

    
3824
    muls64(&r0, &r1, t0, t1);
3825
    CC_DST = r0;
3826
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3827
    return r0;
3828
}
3829

    
3830
void helper_divq_EAX(target_ulong t0)
3831
{
3832
    uint64_t r0, r1;
3833

    
3834
    if (t0 == 0) {
3835
        raise_exception(env, EXCP00_DIVZ);
3836
    }
3837
    r0 = EAX;
3838
    r1 = EDX;
3839
    if (div64(&r0, &r1, t0)) {
3840
        raise_exception(env, EXCP00_DIVZ);
3841
    }
3842
    EAX = r0;
3843
    EDX = r1;
3844
}
3845

    
3846
void helper_idivq_EAX(target_ulong t0)
3847
{
3848
    uint64_t r0, r1;
3849

    
3850
    if (t0 == 0) {
3851
        raise_exception(env, EXCP00_DIVZ);
3852
    }
3853
    r0 = EAX;
3854
    r1 = EDX;
3855
    if (idiv64(&r0, &r1, t0)) {
3856
        raise_exception(env, EXCP00_DIVZ);
3857
    }
3858
    EAX = r0;
3859
    EDX = r1;
3860
}
3861
#endif
3862

    
3863
static void do_hlt(void)
3864
{
3865
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3866
    env->halted = 1;
3867
    env->exception_index = EXCP_HLT;
3868
    cpu_loop_exit(env);
3869
}
3870

    
3871
void helper_hlt(int next_eip_addend)
3872
{
3873
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
3874
    EIP += next_eip_addend;
3875

    
3876
    do_hlt();
3877
}
3878

    
3879
void helper_monitor(target_ulong ptr)
3880
{
3881
    if ((uint32_t)ECX != 0) {
3882
        raise_exception(env, EXCP0D_GPF);
3883
    }
3884
    /* XXX: store address? */
3885
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
3886
}
3887

    
3888
void helper_mwait(int next_eip_addend)
3889
{
3890
    if ((uint32_t)ECX != 0) {
3891
        raise_exception(env, EXCP0D_GPF);
3892
    }
3893
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
3894
    EIP += next_eip_addend;
3895

    
3896
    /* XXX: not complete but not completely erroneous */
3897
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3898
        /* more than one CPU: do not sleep because another CPU may
3899
           wake this one */
3900
    } else {
3901
        do_hlt();
3902
    }
3903
}
3904

    
3905
void helper_debug(void)
3906
{
3907
    env->exception_index = EXCP_DEBUG;
3908
    cpu_loop_exit(env);
3909
}
3910

    
3911
void helper_reset_rf(void)
3912
{
3913
    env->eflags &= ~RF_MASK;
3914
}
3915

    
3916
void helper_cli(void)
3917
{
3918
    env->eflags &= ~IF_MASK;
3919
}
3920

    
3921
void helper_sti(void)
3922
{
3923
    env->eflags |= IF_MASK;
3924
}
3925

    
3926
#if 0
3927
/* vm86plus instructions */
3928
void helper_cli_vm(void)
3929
{
3930
    env->eflags &= ~VIF_MASK;
3931
}
3932

3933
void helper_sti_vm(void)
3934
{
3935
    env->eflags |= VIF_MASK;
3936
    if (env->eflags & VIP_MASK) {
3937
        raise_exception(env, EXCP0D_GPF);
3938
    }
3939
}
3940
#endif
3941

    
3942
void helper_set_inhibit_irq(void)
3943
{
3944
    env->hflags |= HF_INHIBIT_IRQ_MASK;
3945
}
3946

    
3947
void helper_reset_inhibit_irq(void)
3948
{
3949
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
3950
}
3951

    
3952
void helper_boundw(target_ulong a0, int v)
3953
{
3954
    int low, high;
3955

    
3956
    low = ldsw(a0);
3957
    high = ldsw(a0 + 2);
3958
    v = (int16_t)v;
3959
    if (v < low || v > high) {
3960
        raise_exception(env, EXCP05_BOUND);
3961
    }
3962
}
3963

    
3964
void helper_boundl(target_ulong a0, int v)
3965
{
3966
    int low, high;
3967

    
3968
    low = ldl(a0);
3969
    high = ldl(a0 + 4);
3970
    if (v < low || v > high) {
3971
        raise_exception(env, EXCP05_BOUND);
3972
    }
3973
}
3974

    
3975
#if !defined(CONFIG_USER_ONLY)
3976

    
3977
#define MMUSUFFIX _mmu
3978

    
3979
#define SHIFT 0
3980
#include "softmmu_template.h"
3981

    
3982
#define SHIFT 1
3983
#include "softmmu_template.h"
3984

    
3985
#define SHIFT 2
3986
#include "softmmu_template.h"
3987

    
3988
#define SHIFT 3
3989
#include "softmmu_template.h"
3990

    
3991
#endif
3992

    
3993
#if !defined(CONFIG_USER_ONLY)
3994
/* try to fill the TLB and return an exception if error. If retaddr is
3995
   NULL, it means that the function was called in C code (i.e. not
3996
   from generated code or from helper.c) */
3997
/* XXX: fix it to restore all registers */
3998
void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
3999
              uintptr_t retaddr)
4000
{
4001
    TranslationBlock *tb;
4002
    int ret;
4003
    CPUX86State *saved_env;
4004

    
4005
    saved_env = env;
4006
    env = env1;
4007

    
4008
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
4009
    if (ret) {
4010
        if (retaddr) {
4011
            /* now we have a real cpu fault */
4012
            tb = tb_find_pc(retaddr);
4013
            if (tb) {
4014
                /* the PC is inside the translated code. It means that we have
4015
                   a virtual CPU fault */
4016
                cpu_restore_state(tb, env, retaddr);
4017
            }
4018
        }
4019
        raise_exception_err(env, env->exception_index, env->error_code);
4020
    }
4021
    env = saved_env;
4022
}
4023
#endif
4024

    
4025
/* Secure Virtual Machine helpers */
4026

    
4027
#if defined(CONFIG_USER_ONLY)
4028

    
4029
void helper_vmrun(int aflag, int next_eip_addend)
4030
{
4031
}
4032

    
4033
void helper_vmmcall(void)
4034
{
4035
}
4036

    
4037
void helper_vmload(int aflag)
4038
{
4039
}
4040

    
4041
void helper_vmsave(int aflag)
4042
{
4043
}
4044

    
4045
void helper_stgi(void)
4046
{
4047
}
4048

    
4049
void helper_clgi(void)
4050
{
4051
}
4052

    
4053
void helper_skinit(void)
4054
{
4055
}
4056

    
4057
void helper_invlpga(int aflag)
4058
{
4059
}
4060

    
4061
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4062
{
4063
}
4064

    
4065
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4066
{
4067
}
4068

    
4069
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4070
{
4071
}
4072

    
4073
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
4074
                                   uint64_t param)
4075
{
4076
}
4077

    
4078
void helper_svm_check_io(uint32_t port, uint32_t param,
4079
                         uint32_t next_eip_addend)
4080
{
4081
}
4082
#else
4083

    
4084
static inline void svm_save_seg(target_phys_addr_t addr,
4085
                                const SegmentCache *sc)
4086
{
4087
    stw_phys(addr + offsetof(struct vmcb_seg, selector),
4088
             sc->selector);
4089
    stq_phys(addr + offsetof(struct vmcb_seg, base),
4090
             sc->base);
4091
    stl_phys(addr + offsetof(struct vmcb_seg, limit),
4092
             sc->limit);
4093
    stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4094
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4095
}
4096

    
4097
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4098
{
4099
    unsigned int flags;
4100

    
4101
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4102
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4103
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4104
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4105
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4106
}
4107

    
4108
static inline void svm_load_seg_cache(target_phys_addr_t addr,
4109
                                      CPUX86State *env, int seg_reg)
4110
{
4111
    SegmentCache sc1, *sc = &sc1;
4112

    
4113
    svm_load_seg(addr, sc);
4114
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4115
                           sc->base, sc->limit, sc->flags);
4116
}
4117

    
4118
void helper_vmrun(int aflag, int next_eip_addend)
4119
{
4120
    target_ulong addr;
4121
    uint32_t event_inj;
4122
    uint32_t int_ctl;
4123

    
4124
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4125

    
4126
    if (aflag == 2) {
4127
        addr = EAX;
4128
    } else {
4129
        addr = (uint32_t)EAX;
4130
    }
4131

    
4132
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4133

    
4134
    env->vm_vmcb = addr;
4135

    
4136
    /* save the current CPU state in the hsave page */
4137
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
4138
             env->gdt.base);
4139
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
4140
             env->gdt.limit);
4141

    
4142
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
4143
             env->idt.base);
4144
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
4145
             env->idt.limit);
4146

    
4147
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4148
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4149
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4150
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4151
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4152
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4153

    
4154
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4155
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
4156
             compute_eflags());
4157

    
4158
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4159
                 &env->segs[R_ES]);
4160
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4161
                 &env->segs[R_CS]);
4162
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4163
                 &env->segs[R_SS]);
4164
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4165
                 &env->segs[R_DS]);
4166

    
4167
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4168
             EIP + next_eip_addend);
4169
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4170
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4171

    
4172
    /* load the interception bitmaps so we do not need to access the
4173
       vmcb in svm mode */
4174
    env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4175
                                                      control.intercept));
4176
    env->intercept_cr_read = lduw_phys(env->vm_vmcb +
4177
                                       offsetof(struct vmcb,
4178
                                                control.intercept_cr_read));
4179
    env->intercept_cr_write = lduw_phys(env->vm_vmcb +
4180
                                        offsetof(struct vmcb,
4181
                                                 control.intercept_cr_write));
4182
    env->intercept_dr_read = lduw_phys(env->vm_vmcb +
4183
                                       offsetof(struct vmcb,
4184
                                                control.intercept_dr_read));
4185
    env->intercept_dr_write = lduw_phys(env->vm_vmcb +
4186
                                        offsetof(struct vmcb,
4187
                                                 control.intercept_dr_write));
4188
    env->intercept_exceptions = ldl_phys(env->vm_vmcb +
4189
                                         offsetof(struct vmcb,
4190
                                                  control.intercept_exceptions
4191
                                                  ));
4192

    
4193
    /* enable intercepts */
4194
    env->hflags |= HF_SVMI_MASK;
4195

    
4196
    env->tsc_offset = ldq_phys(env->vm_vmcb +
4197
                               offsetof(struct vmcb, control.tsc_offset));
4198

    
4199
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4200
                                                      save.gdtr.base));
4201
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4202
                                                      save.gdtr.limit));
4203

    
4204
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4205
                                                      save.idtr.base));
4206
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4207
                                                      save.idtr.limit));
4208

    
4209
    /* clear exit_info_2 so we behave like the real hardware */
4210
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4211

    
4212
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4213
                                                             save.cr0)));
4214
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4215
                                                             save.cr4)));
4216
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4217
                                                             save.cr3)));
4218
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4219
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4220
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4221
    if (int_ctl & V_INTR_MASKING_MASK) {
4222
        env->v_tpr = int_ctl & V_TPR_MASK;
4223
        env->hflags2 |= HF2_VINTR_MASK;
4224
        if (env->eflags & IF_MASK) {
4225
            env->hflags2 |= HF2_HIF_MASK;
4226
        }
4227
    }
4228

    
4229
    cpu_load_efer(env,
4230
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4231
    env->eflags = 0;
4232
    load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4233
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4234
    CC_OP = CC_OP_EFLAGS;
4235

    
4236
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4237
                       env, R_ES);
4238
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4239
                       env, R_CS);
4240
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4241
                       env, R_SS);
4242
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4243
                       env, R_DS);
4244

    
4245
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4246
    env->eip = EIP;
4247
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4248
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4249
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4250
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4251
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
4252
                                                           save.cpl)));
4253

    
4254
    /* FIXME: guest state consistency checks */
4255

    
4256
    switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4257
    case TLB_CONTROL_DO_NOTHING:
4258
        break;
4259
    case TLB_CONTROL_FLUSH_ALL_ASID:
4260
        /* FIXME: this is not 100% correct but should work for now */
4261
        tlb_flush(env, 1);
4262
        break;
4263
    }
4264

    
4265
    env->hflags2 |= HF2_GIF_MASK;
4266

    
4267
    if (int_ctl & V_IRQ_MASK) {
4268
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4269
    }
4270

    
4271
    /* maybe we need to inject an event */
4272
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4273
                                                 control.event_inj));
4274
    if (event_inj & SVM_EVTINJ_VALID) {
4275
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4276
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4277
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
4278
                                          offsetof(struct vmcb,
4279
                                                   control.event_inj_err));
4280

    
4281
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4282
        /* FIXME: need to implement valid_err */
4283
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4284
        case SVM_EVTINJ_TYPE_INTR:
4285
            env->exception_index = vector;
4286
            env->error_code = event_inj_err;
4287
            env->exception_is_int = 0;
4288
            env->exception_next_eip = -1;
4289
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4290
            /* XXX: is it always correct? */
4291
            do_interrupt_x86_hardirq(env, vector, 1);
4292
            break;
4293
        case SVM_EVTINJ_TYPE_NMI:
4294
            env->exception_index = EXCP02_NMI;
4295
            env->error_code = event_inj_err;
4296
            env->exception_is_int = 0;
4297
            env->exception_next_eip = EIP;
4298
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4299
            cpu_loop_exit(env);
4300
            break;
4301
        case SVM_EVTINJ_TYPE_EXEPT:
4302
            env->exception_index = vector;
4303
            env->error_code = event_inj_err;
4304
            env->exception_is_int = 0;
4305
            env->exception_next_eip = -1;
4306
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4307
            cpu_loop_exit(env);
4308
            break;
4309
        case SVM_EVTINJ_TYPE_SOFT:
4310
            env->exception_index = vector;
4311
            env->error_code = event_inj_err;
4312
            env->exception_is_int = 1;
4313
            env->exception_next_eip = EIP;
4314
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
4315
            cpu_loop_exit(env);
4316
            break;
4317
        }
4318
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
4319
                      env->error_code);
4320
    }
4321
}
4322

    
4323
void helper_vmmcall(void)
4324
{
4325
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4326
    raise_exception(env, EXCP06_ILLOP);
4327
}
4328

    
4329
void helper_vmload(int aflag)
4330
{
4331
    target_ulong addr;
4332

    
4333
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4334

    
4335
    if (aflag == 2) {
4336
        addr = EAX;
4337
    } else {
4338
        addr = (uint32_t)EAX;
4339
    }
4340

    
4341
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
4342
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4343
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4344
                  env->segs[R_FS].base);
4345

    
4346
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4347
                       env, R_FS);
4348
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4349
                       env, R_GS);
4350
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4351
                 &env->tr);
4352
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4353
                 &env->ldt);
4354

    
4355
#ifdef TARGET_X86_64
4356
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
4357
                                                 save.kernel_gs_base));
4358
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4359
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4360
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4361
#endif
4362
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4363
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4364
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
4365
                                                 save.sysenter_esp));
4366
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
4367
                                                 save.sysenter_eip));
4368
}
4369

    
4370
void helper_vmsave(int aflag)
4371
{
4372
    target_ulong addr;
4373

    
4374
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4375

    
4376
    if (aflag == 2) {
4377
        addr = EAX;
4378
    } else {
4379
        addr = (uint32_t)EAX;
4380
    }
4381

    
4382
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
4383
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4384
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4385
                  env->segs[R_FS].base);
4386

    
4387
    svm_save_seg(addr + offsetof(struct vmcb, save.fs),
4388
                 &env->segs[R_FS]);
4389
    svm_save_seg(addr + offsetof(struct vmcb, save.gs),
4390
                 &env->segs[R_GS]);
4391
    svm_save_seg(addr + offsetof(struct vmcb, save.tr),
4392
                 &env->tr);
4393
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
4394
                 &env->ldt);
4395

    
4396
#ifdef TARGET_X86_64
4397
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
4398
             env->kernelgsbase);
4399
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4400
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4401
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4402
#endif
4403
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4404
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4405
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
4406
             env->sysenter_esp);
4407
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
4408
             env->sysenter_eip);
4409
}
4410

    
4411
void helper_stgi(void)
4412
{
4413
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
4414
    env->hflags2 |= HF2_GIF_MASK;
4415
}
4416

    
4417
void helper_clgi(void)
4418
{
4419
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
4420
    env->hflags2 &= ~HF2_GIF_MASK;
4421
}
4422

    
4423
void helper_skinit(void)
4424
{
4425
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
4426
    /* XXX: not implemented */
4427
    raise_exception(env, EXCP06_ILLOP);
4428
}
4429

    
4430
void helper_invlpga(int aflag)
4431
{
4432
    target_ulong addr;
4433

    
4434
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
4435

    
4436
    if (aflag == 2) {
4437
        addr = EAX;
4438
    } else {
4439
        addr = (uint32_t)EAX;
4440
    }
4441

    
4442
    /* XXX: could use the ASID to see if it is needed to do the
4443
       flush */
4444
    tlb_flush_page(env, addr);
4445
}
4446

    
4447
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4448
{
4449
    if (likely(!(env->hflags & HF_SVMI_MASK))) {
4450
        return;
4451
    }
4452
    switch (type) {
4453
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4454
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
4455
            helper_vmexit(type, param);
4456
        }
4457
        break;
4458
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4459
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
4460
            helper_vmexit(type, param);
4461
        }
4462
        break;
4463
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
4464
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
4465
            helper_vmexit(type, param);
4466
        }
4467
        break;
4468
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
4469
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
4470
            helper_vmexit(type, param);
4471
        }
4472
        break;
4473
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
4474
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
4475
            helper_vmexit(type, param);
4476
        }
4477
        break;
4478
    case SVM_EXIT_MSR:
4479
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
4480
            /* FIXME: this should be read in at vmrun (faster this way?) */
4481
            uint64_t addr = ldq_phys(env->vm_vmcb +
4482
                                     offsetof(struct vmcb,
4483
                                              control.msrpm_base_pa));
4484
            uint32_t t0, t1;
4485

    
4486
            switch ((uint32_t)ECX) {
4487
            case 0 ... 0x1fff:
4488
                t0 = (ECX * 2) % 8;
4489
                t1 = (ECX * 2) / 8;
4490
                break;
4491
            case 0xc0000000 ... 0xc0001fff:
4492
                t0 = (8192 + ECX - 0xc0000000) * 2;
4493
                t1 = (t0 / 8);
4494
                t0 %= 8;
4495
                break;
4496
            case 0xc0010000 ... 0xc0011fff:
4497
                t0 = (16384 + ECX - 0xc0010000) * 2;
4498
                t1 = (t0 / 8);
4499
                t0 %= 8;
4500
                break;
4501
            default:
4502
                helper_vmexit(type, param);
4503
                t0 = 0;
4504
                t1 = 0;
4505
                break;
4506
            }
4507
            if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
4508
                helper_vmexit(type, param);
4509
            }
4510
        }
4511
        break;
4512
    default:
4513
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
4514
            helper_vmexit(type, param);
4515
        }
4516
        break;
4517
    }
4518
}
4519

    
4520
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
4521
                                   uint64_t param)
4522
{
4523
    CPUX86State *saved_env;
4524

    
4525
    saved_env = env;
4526
    env = env1;
4527
    helper_svm_check_intercept_param(type, param);
4528
    env = saved_env;
4529
}
4530

    
4531
void helper_svm_check_io(uint32_t port, uint32_t param,
4532
                         uint32_t next_eip_addend)
4533
{
4534
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
4535
        /* FIXME: this should be read in at vmrun (faster this way?) */
4536
        uint64_t addr = ldq_phys(env->vm_vmcb +
4537
                                 offsetof(struct vmcb, control.iopm_base_pa));
4538
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4539

    
4540
        if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
4541
            /* next EIP */
4542
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
4543
                     env->eip + next_eip_addend);
4544
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
4545
        }
4546
    }
4547
}
4548

    
4549
/* Note: currently only 32 bits of exit_code are used */
4550
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4551
{
4552
    uint32_t int_ctl;
4553

    
4554
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
4555
                  PRIx64 ", " TARGET_FMT_lx ")!\n",
4556
                  exit_code, exit_info_1,
4557
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4558
                                                   control.exit_info_2)),
4559
                  EIP);
4560

    
4561
    if (env->hflags & HF_INHIBIT_IRQ_MASK) {
4562
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
4563
                 SVM_INTERRUPT_SHADOW_MASK);
4564
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4565
    } else {
4566
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4567
    }
4568

    
4569
    /* Save the VM state in the vmcb */
4570
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
4571
                 &env->segs[R_ES]);
4572
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4573
                 &env->segs[R_CS]);
4574
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4575
                 &env->segs[R_SS]);
4576
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4577
                 &env->segs[R_DS]);
4578

    
4579
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
4580
             env->gdt.base);
4581
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
4582
             env->gdt.limit);
4583

    
4584
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
4585
             env->idt.base);
4586
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
4587
             env->idt.limit);
4588

    
4589
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4590
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4591
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4592
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4593
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4594

    
4595
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4596
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
4597
    int_ctl |= env->v_tpr & V_TPR_MASK;
4598
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
4599
        int_ctl |= V_IRQ_MASK;
4600
    }
4601
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4602

    
4603
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
4604
             compute_eflags());
4605
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4606
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4607
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4608
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4609
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4610
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
4611
             env->hflags & HF_CPL_MASK);
4612

    
4613
    /* Reload the host state from vm_hsave */
4614
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4615
    env->hflags &= ~HF_SVMI_MASK;
4616
    env->intercept = 0;
4617
    env->intercept_exceptions = 0;
4618
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4619
    env->tsc_offset = 0;
4620

    
4621
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4622
                                                       save.gdtr.base));
4623
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4624
                                                       save.gdtr.limit));
4625

    
4626
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4627
                                                       save.idtr.base));
4628
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4629
                                                       save.idtr.limit));
4630

    
4631
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4632
                                                              save.cr0)) |
4633
                       CR0_PE_MASK);
4634
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4635
                                                              save.cr4)));
4636
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4637
                                                              save.cr3)));
4638
    /* we need to set the efer after the crs so the hidden flags get
4639
       set properly */
4640
    cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4641
                                                         save.efer)));
4642
    env->eflags = 0;
4643
    load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4644
                ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4645
    CC_OP = CC_OP_EFLAGS;
4646

    
4647
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
4648
                       env, R_ES);
4649
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
4650
                       env, R_CS);
4651
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
4652
                       env, R_SS);
4653
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
4654
                       env, R_DS);
4655

    
4656
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4657
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4658
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4659

    
4660
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4661
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4662

    
4663
    /* other setups */
4664
    cpu_x86_set_cpl(env, 0);
4665
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
4666
             exit_code);
4667
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
4668
             exit_info_1);
4669

    
4670
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
4671
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4672
                                              control.event_inj)));
4673
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
4674
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4675
                                              control.event_inj_err)));
4676
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
4677

    
4678
    env->hflags2 &= ~HF2_GIF_MASK;
4679
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4680

    
4681
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4682

    
4683
    /* Clears the TSC_OFFSET inside the processor. */
4684

    
4685
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4686
       from the page table indicated the host's CR3. If the PDPEs contain
4687
       illegal state, the processor causes a shutdown. */
4688

    
4689
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4690
    env->cr[0] |= CR0_PE_MASK;
4691
    env->eflags &= ~VM_MASK;
4692

    
4693
    /* Disables all breakpoints in the host DR7 register. */
4694

    
4695
    /* Checks the reloaded host state for consistency. */
4696

    
4697
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4698
       host's code segment or non-canonical (in the case of long mode), a
4699
       #GP fault is delivered inside the host. */
4700

    
4701
    /* remove any pending exception */
4702
    env->exception_index = -1;
4703
    env->error_code = 0;
4704
    env->old_exception = -1;
4705

    
4706
    cpu_loop_exit(env);
4707
}
4708

    
4709
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4710
{
4711
    env = nenv;
4712
    helper_vmexit(exit_code, exit_info_1);
4713
}
4714

    
4715
#endif
4716

    
4717
#define SHIFT 0
4718
#include "cc_helper_template.h"
4719
#undef SHIFT
4720

    
4721
#define SHIFT 1
4722
#include "cc_helper_template.h"
4723
#undef SHIFT
4724

    
4725
#define SHIFT 2
4726
#include "cc_helper_template.h"
4727
#undef SHIFT
4728

    
4729
#ifdef TARGET_X86_64
4730

    
4731
#define SHIFT 3
4732
#include "cc_helper_template.h"
4733
#undef SHIFT
4734

    
4735
#endif
4736

    
4737
#define SHIFT 0
4738
#include "shift_helper_template.h"
4739
#undef SHIFT
4740

    
4741
#define SHIFT 1
4742
#include "shift_helper_template.h"
4743
#undef SHIFT
4744

    
4745
#define SHIFT 2
4746
#include "shift_helper_template.h"
4747
#undef SHIFT
4748

    
4749
#ifdef TARGET_X86_64
4750
#define SHIFT 3
4751
#include "shift_helper_template.h"
4752
#undef SHIFT
4753
#endif
4754

    
4755
/* bit operations */
4756
target_ulong helper_bsf(target_ulong t0)
4757
{
4758
    int count;
4759
    target_ulong res;
4760

    
4761
    res = t0;
4762
    count = 0;
4763
    while ((res & 1) == 0) {
4764
        count++;
4765
        res >>= 1;
4766
    }
4767
    return count;
4768
}
4769

    
4770
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
4771
{
4772
    int count;
4773
    target_ulong res, mask;
4774

    
4775
    if (wordsize > 0 && t0 == 0) {
4776
        return wordsize;
4777
    }
4778
    res = t0;
4779
    count = TARGET_LONG_BITS - 1;
4780
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
4781
    while ((res & mask) == 0) {
4782
        count--;
4783
        res <<= 1;
4784
    }
4785
    if (wordsize > 0) {
4786
        return wordsize - 1 - count;
4787
    }
4788
    return count;
4789
}
4790

    
4791
target_ulong helper_bsr(target_ulong t0)
4792
{
4793
    return helper_lzcnt(t0, 0);
4794
}
4795

    
4796
static int compute_all_eflags(void)
4797
{
4798
    return CC_SRC;
4799
}
4800

    
4801
static int compute_c_eflags(void)
4802
{
4803
    return CC_SRC & CC_C;
4804
}
4805

    
4806
uint32_t helper_cc_compute_all(int op)
4807
{
4808
    switch (op) {
4809
    default: /* should never happen */
4810
        return 0;
4811

    
4812
    case CC_OP_EFLAGS:
4813
        return compute_all_eflags();
4814

    
4815
    case CC_OP_MULB:
4816
        return compute_all_mulb();
4817
    case CC_OP_MULW:
4818
        return compute_all_mulw();
4819
    case CC_OP_MULL:
4820
        return compute_all_mull();
4821

    
4822
    case CC_OP_ADDB:
4823
        return compute_all_addb();
4824
    case CC_OP_ADDW:
4825
        return compute_all_addw();
4826
    case CC_OP_ADDL:
4827
        return compute_all_addl();
4828

    
4829
    case CC_OP_ADCB:
4830
        return compute_all_adcb();
4831
    case CC_OP_ADCW:
4832
        return compute_all_adcw();
4833
    case CC_OP_ADCL:
4834
        return compute_all_adcl();
4835

    
4836
    case CC_OP_SUBB:
4837
        return compute_all_subb();
4838
    case CC_OP_SUBW:
4839
        return compute_all_subw();
4840
    case CC_OP_SUBL:
4841
        return compute_all_subl();
4842

    
4843
    case CC_OP_SBBB:
4844
        return compute_all_sbbb();
4845
    case CC_OP_SBBW:
4846
        return compute_all_sbbw();
4847
    case CC_OP_SBBL:
4848
        return compute_all_sbbl();
4849

    
4850
    case CC_OP_LOGICB:
4851
        return compute_all_logicb();
4852
    case CC_OP_LOGICW:
4853
        return compute_all_logicw();
4854
    case CC_OP_LOGICL:
4855
        return compute_all_logicl();
4856

    
4857
    case CC_OP_INCB:
4858
        return compute_all_incb();
4859
    case CC_OP_INCW:
4860
        return compute_all_incw();
4861
    case CC_OP_INCL:
4862
        return compute_all_incl();
4863

    
4864
    case CC_OP_DECB:
4865
        return compute_all_decb();
4866
    case CC_OP_DECW:
4867
        return compute_all_decw();
4868
    case CC_OP_DECL:
4869
        return compute_all_decl();
4870

    
4871
    case CC_OP_SHLB:
4872
        return compute_all_shlb();
4873
    case CC_OP_SHLW:
4874
        return compute_all_shlw();
4875
    case CC_OP_SHLL:
4876
        return compute_all_shll();
4877

    
4878
    case CC_OP_SARB:
4879
        return compute_all_sarb();
4880
    case CC_OP_SARW:
4881
        return compute_all_sarw();
4882
    case CC_OP_SARL:
4883
        return compute_all_sarl();
4884

    
4885
#ifdef TARGET_X86_64
4886
    case CC_OP_MULQ:
4887
        return compute_all_mulq();
4888

    
4889
    case CC_OP_ADDQ:
4890
        return compute_all_addq();
4891

    
4892
    case CC_OP_ADCQ:
4893
        return compute_all_adcq();
4894

    
4895
    case CC_OP_SUBQ:
4896
        return compute_all_subq();
4897

    
4898
    case CC_OP_SBBQ:
4899
        return compute_all_sbbq();
4900

    
4901
    case CC_OP_LOGICQ:
4902
        return compute_all_logicq();
4903

    
4904
    case CC_OP_INCQ:
4905
        return compute_all_incq();
4906

    
4907
    case CC_OP_DECQ:
4908
        return compute_all_decq();
4909

    
4910
    case CC_OP_SHLQ:
4911
        return compute_all_shlq();
4912

    
4913
    case CC_OP_SARQ:
4914
        return compute_all_sarq();
4915
#endif
4916
    }
4917
}
4918

    
4919
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
4920
{
4921
    CPUX86State *saved_env;
4922
    uint32_t ret;
4923

    
4924
    saved_env = env;
4925
    env = env1;
4926
    ret = helper_cc_compute_all(op);
4927
    env = saved_env;
4928
    return ret;
4929
}
4930

    
4931
uint32_t helper_cc_compute_c(int op)
4932
{
4933
    switch (op) {
4934
    default: /* should never happen */
4935
        return 0;
4936

    
4937
    case CC_OP_EFLAGS:
4938
        return compute_c_eflags();
4939

    
4940
    case CC_OP_MULB:
4941
        return compute_c_mull();
4942
    case CC_OP_MULW:
4943
        return compute_c_mull();
4944
    case CC_OP_MULL:
4945
        return compute_c_mull();
4946

    
4947
    case CC_OP_ADDB:
4948
        return compute_c_addb();
4949
    case CC_OP_ADDW:
4950
        return compute_c_addw();
4951
    case CC_OP_ADDL:
4952
        return compute_c_addl();
4953

    
4954
    case CC_OP_ADCB:
4955
        return compute_c_adcb();
4956
    case CC_OP_ADCW:
4957
        return compute_c_adcw();
4958
    case CC_OP_ADCL:
4959
        return compute_c_adcl();
4960

    
4961
    case CC_OP_SUBB:
4962
        return compute_c_subb();
4963
    case CC_OP_SUBW:
4964
        return compute_c_subw();
4965
    case CC_OP_SUBL:
4966
        return compute_c_subl();
4967

    
4968
    case CC_OP_SBBB:
4969
        return compute_c_sbbb();
4970
    case CC_OP_SBBW:
4971
        return compute_c_sbbw();
4972
    case CC_OP_SBBL:
4973
        return compute_c_sbbl();
4974

    
4975
    case CC_OP_LOGICB:
4976
        return compute_c_logicb();
4977
    case CC_OP_LOGICW:
4978
        return compute_c_logicw();
4979
    case CC_OP_LOGICL:
4980
        return compute_c_logicl();
4981

    
4982
    case CC_OP_INCB:
4983
        return compute_c_incl();
4984
    case CC_OP_INCW:
4985
        return compute_c_incl();
4986
    case CC_OP_INCL:
4987
        return compute_c_incl();
4988

    
4989
    case CC_OP_DECB:
4990
        return compute_c_incl();
4991
    case CC_OP_DECW:
4992
        return compute_c_incl();
4993
    case CC_OP_DECL:
4994
        return compute_c_incl();
4995

    
4996
    case CC_OP_SHLB:
4997
        return compute_c_shlb();
4998
    case CC_OP_SHLW:
4999
        return compute_c_shlw();
5000
    case CC_OP_SHLL:
5001
        return compute_c_shll();
5002

    
5003
    case CC_OP_SARB:
5004
        return compute_c_sarl();
5005
    case CC_OP_SARW:
5006
        return compute_c_sarl();
5007
    case CC_OP_SARL:
5008
        return compute_c_sarl();
5009

    
5010
#ifdef TARGET_X86_64
5011
    case CC_OP_MULQ:
5012
        return compute_c_mull();
5013

    
5014
    case CC_OP_ADDQ:
5015
        return compute_c_addq();
5016

    
5017
    case CC_OP_ADCQ:
5018
        return compute_c_adcq();
5019

    
5020
    case CC_OP_SUBQ:
5021
        return compute_c_subq();
5022

    
5023
    case CC_OP_SBBQ:
5024
        return compute_c_sbbq();
5025

    
5026
    case CC_OP_LOGICQ:
5027
        return compute_c_logicq();
5028

    
5029
    case CC_OP_INCQ:
5030
        return compute_c_incl();
5031

    
5032
    case CC_OP_DECQ:
5033
        return compute_c_incl();
5034

    
5035
    case CC_OP_SHLQ:
5036
        return compute_c_shlq();
5037

    
5038
    case CC_OP_SARQ:
5039
        return compute_c_sarl();
5040
#endif
5041
    }
5042
}