Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ d7582078

History | View | Annotate | Download (128.3 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22
#include "ioport.h"
23
#include "qemu-log.h"
24
#include "cpu-defs.h"
25
#include "helper.h"
26

    
27
#if !defined(CONFIG_USER_ONLY)
28
#include "softmmu_exec.h"
29
#endif /* !defined(CONFIG_USER_ONLY) */
30

    
31
//#define DEBUG_PCALL
32

    
33
#ifdef DEBUG_PCALL
34
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
35
# define LOG_PCALL_STATE(env)                                  \
36
    log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
37
#else
38
# define LOG_PCALL(...) do { } while (0)
39
# define LOG_PCALL_STATE(env) do { } while (0)
40
#endif
41

    
42
/* broken thread support */
43

    
44
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
45

    
46
void helper_lock(void)
47
{
48
    spin_lock(&global_cpu_lock);
49
}
50

    
51
void helper_unlock(void)
52
{
53
    spin_unlock(&global_cpu_lock);
54
}
55

    
56
/* return non zero if error */
57
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
58
                               int selector)
59
{
60
    SegmentCache *dt;
61
    int index;
62
    target_ulong ptr;
63

    
64
    if (selector & 0x4) {
65
        dt = &env->ldt;
66
    } else {
67
        dt = &env->gdt;
68
    }
69
    index = selector & ~7;
70
    if ((index + 7) > dt->limit) {
71
        return -1;
72
    }
73
    ptr = dt->base + index;
74
    *e1_ptr = ldl_kernel(ptr);
75
    *e2_ptr = ldl_kernel(ptr + 4);
76
    return 0;
77
}
78

    
79
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
80
{
81
    unsigned int limit;
82

    
83
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
84
    if (e2 & DESC_G_MASK) {
85
        limit = (limit << 12) | 0xfff;
86
    }
87
    return limit;
88
}
89

    
90
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
91
{
92
    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
93
}
94

    
95
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
96
                                         uint32_t e2)
97
{
98
    sc->base = get_seg_base(e1, e2);
99
    sc->limit = get_seg_limit(e1, e2);
100
    sc->flags = e2;
101
}
102

    
103
/* init the segment cache in vm86 mode. */
104
static inline void load_seg_vm(int seg, int selector)
105
{
106
    selector &= 0xffff;
107
    cpu_x86_load_seg_cache(env, seg, selector,
108
                           (selector << 4), 0xffff, 0);
109
}
110

    
111
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
112
                                       uint32_t *esp_ptr, int dpl)
113
{
114
    int type, index, shift;
115

    
116
#if 0
117
    {
118
        int i;
119
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
120
        for (i = 0; i < env->tr.limit; i++) {
121
            printf("%02x ", env->tr.base[i]);
122
            if ((i & 7) == 7) {
123
                printf("\n");
124
            }
125
        }
126
        printf("\n");
127
    }
128
#endif
129

    
130
    if (!(env->tr.flags & DESC_P_MASK)) {
131
        cpu_abort(env, "invalid tss");
132
    }
133
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
134
    if ((type & 7) != 1) {
135
        cpu_abort(env, "invalid tss type");
136
    }
137
    shift = type >> 3;
138
    index = (dpl * 4 + 2) << shift;
139
    if (index + (4 << shift) - 1 > env->tr.limit) {
140
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
141
    }
142
    if (shift == 0) {
143
        *esp_ptr = lduw_kernel(env->tr.base + index);
144
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
145
    } else {
146
        *esp_ptr = ldl_kernel(env->tr.base + index);
147
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
148
    }
149
}
150

    
151
/* XXX: merge with load_seg() */
152
static void tss_load_seg(int seg_reg, int selector)
153
{
154
    uint32_t e1, e2;
155
    int rpl, dpl, cpl;
156

    
157
    if ((selector & 0xfffc) != 0) {
158
        if (load_segment(&e1, &e2, selector) != 0) {
159
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
160
        }
161
        if (!(e2 & DESC_S_MASK)) {
162
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
163
        }
164
        rpl = selector & 3;
165
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
166
        cpl = env->hflags & HF_CPL_MASK;
167
        if (seg_reg == R_CS) {
168
            if (!(e2 & DESC_CS_MASK)) {
169
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
170
            }
171
            /* XXX: is it correct? */
172
            if (dpl != rpl) {
173
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
174
            }
175
            if ((e2 & DESC_C_MASK) && dpl > rpl) {
176
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
177
            }
178
        } else if (seg_reg == R_SS) {
179
            /* SS must be writable data */
180
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
181
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
182
            }
183
            if (dpl != cpl || dpl != rpl) {
184
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
185
            }
186
        } else {
187
            /* not readable code */
188
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
189
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
190
            }
191
            /* if data or non conforming code, checks the rights */
192
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
193
                if (dpl < cpl || dpl < rpl) {
194
                    raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
195
                }
196
            }
197
        }
198
        if (!(e2 & DESC_P_MASK)) {
199
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
200
        }
201
        cpu_x86_load_seg_cache(env, seg_reg, selector,
202
                               get_seg_base(e1, e2),
203
                               get_seg_limit(e1, e2),
204
                               e2);
205
    } else {
206
        if (seg_reg == R_SS || seg_reg == R_CS) {
207
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
208
        }
209
    }
210
}
211

    
212
#define SWITCH_TSS_JMP  0
213
#define SWITCH_TSS_IRET 1
214
#define SWITCH_TSS_CALL 2
215

    
216
/* XXX: restore CPU state in registers (PowerPC case) */
217
static void switch_tss(int tss_selector,
218
                       uint32_t e1, uint32_t e2, int source,
219
                       uint32_t next_eip)
220
{
221
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
222
    target_ulong tss_base;
223
    uint32_t new_regs[8], new_segs[6];
224
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
225
    uint32_t old_eflags, eflags_mask;
226
    SegmentCache *dt;
227
    int index;
228
    target_ulong ptr;
229

    
230
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
231
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
232
              source);
233

    
234
    /* if task gate, we read the TSS segment and we load it */
235
    if (type == 5) {
236
        if (!(e2 & DESC_P_MASK)) {
237
            raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
238
        }
239
        tss_selector = e1 >> 16;
240
        if (tss_selector & 4) {
241
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
242
        }
243
        if (load_segment(&e1, &e2, tss_selector) != 0) {
244
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
245
        }
246
        if (e2 & DESC_S_MASK) {
247
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
248
        }
249
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250
        if ((type & 7) != 1) {
251
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
252
        }
253
    }
254

    
255
    if (!(e2 & DESC_P_MASK)) {
256
        raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
257
    }
258

    
259
    if (type & 8) {
260
        tss_limit_max = 103;
261
    } else {
262
        tss_limit_max = 43;
263
    }
264
    tss_limit = get_seg_limit(e1, e2);
265
    tss_base = get_seg_base(e1, e2);
266
    if ((tss_selector & 4) != 0 ||
267
        tss_limit < tss_limit_max) {
268
        raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
269
    }
270
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
271
    if (old_type & 8) {
272
        old_tss_limit_max = 103;
273
    } else {
274
        old_tss_limit_max = 43;
275
    }
276

    
277
    /* read all the registers from the new TSS */
278
    if (type & 8) {
279
        /* 32 bit */
280
        new_cr3 = ldl_kernel(tss_base + 0x1c);
281
        new_eip = ldl_kernel(tss_base + 0x20);
282
        new_eflags = ldl_kernel(tss_base + 0x24);
283
        for (i = 0; i < 8; i++) {
284
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
285
        }
286
        for (i = 0; i < 6; i++) {
287
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
288
        }
289
        new_ldt = lduw_kernel(tss_base + 0x60);
290
        new_trap = ldl_kernel(tss_base + 0x64);
291
    } else {
292
        /* 16 bit */
293
        new_cr3 = 0;
294
        new_eip = lduw_kernel(tss_base + 0x0e);
295
        new_eflags = lduw_kernel(tss_base + 0x10);
296
        for (i = 0; i < 8; i++) {
297
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
298
        }
299
        for (i = 0; i < 4; i++) {
300
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
301
        }
302
        new_ldt = lduw_kernel(tss_base + 0x2a);
303
        new_segs[R_FS] = 0;
304
        new_segs[R_GS] = 0;
305
        new_trap = 0;
306
    }
307
    /* XXX: avoid a compiler warning, see
308
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
309
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
310
    (void)new_trap;
311

    
312
    /* NOTE: we must avoid memory exceptions during the task switch,
313
       so we make dummy accesses before */
314
    /* XXX: it can still fail in some cases, so a bigger hack is
315
       necessary to valid the TLB after having done the accesses */
316

    
317
    v1 = ldub_kernel(env->tr.base);
318
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
319
    stb_kernel(env->tr.base, v1);
320
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
321

    
322
    /* clear busy bit (it is restartable) */
323
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
324
        target_ulong ptr;
325
        uint32_t e2;
326

    
327
        ptr = env->gdt.base + (env->tr.selector & ~7);
328
        e2 = ldl_kernel(ptr + 4);
329
        e2 &= ~DESC_TSS_BUSY_MASK;
330
        stl_kernel(ptr + 4, e2);
331
    }
332
    old_eflags = cpu_compute_eflags(env);
333
    if (source == SWITCH_TSS_IRET) {
334
        old_eflags &= ~NT_MASK;
335
    }
336

    
337
    /* save the current state in the old TSS */
338
    if (type & 8) {
339
        /* 32 bit */
340
        stl_kernel(env->tr.base + 0x20, next_eip);
341
        stl_kernel(env->tr.base + 0x24, old_eflags);
342
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
343
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
344
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
345
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
346
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
347
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
348
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
349
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
350
        for (i = 0; i < 6; i++) {
351
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
352
        }
353
    } else {
354
        /* 16 bit */
355
        stw_kernel(env->tr.base + 0x0e, next_eip);
356
        stw_kernel(env->tr.base + 0x10, old_eflags);
357
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
358
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
359
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
360
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
361
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
362
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
363
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
364
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
365
        for (i = 0; i < 4; i++) {
366
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
367
        }
368
    }
369

    
370
    /* now if an exception occurs, it will occurs in the next task
371
       context */
372

    
373
    if (source == SWITCH_TSS_CALL) {
374
        stw_kernel(tss_base, env->tr.selector);
375
        new_eflags |= NT_MASK;
376
    }
377

    
378
    /* set busy bit */
379
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
380
        target_ulong ptr;
381
        uint32_t e2;
382

    
383
        ptr = env->gdt.base + (tss_selector & ~7);
384
        e2 = ldl_kernel(ptr + 4);
385
        e2 |= DESC_TSS_BUSY_MASK;
386
        stl_kernel(ptr + 4, e2);
387
    }
388

    
389
    /* set the new CPU state */
390
    /* from this point, any exception which occurs can give problems */
391
    env->cr[0] |= CR0_TS_MASK;
392
    env->hflags |= HF_TS_MASK;
393
    env->tr.selector = tss_selector;
394
    env->tr.base = tss_base;
395
    env->tr.limit = tss_limit;
396
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
397

    
398
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
399
        cpu_x86_update_cr3(env, new_cr3);
400
    }
401

    
402
    /* load all registers without an exception, then reload them with
403
       possible exception */
404
    env->eip = new_eip;
405
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
406
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
407
    if (!(type & 8)) {
408
        eflags_mask &= 0xffff;
409
    }
410
    cpu_load_eflags(env, new_eflags, eflags_mask);
411
    /* XXX: what to do in 16 bit case? */
412
    EAX = new_regs[0];
413
    ECX = new_regs[1];
414
    EDX = new_regs[2];
415
    EBX = new_regs[3];
416
    ESP = new_regs[4];
417
    EBP = new_regs[5];
418
    ESI = new_regs[6];
419
    EDI = new_regs[7];
420
    if (new_eflags & VM_MASK) {
421
        for (i = 0; i < 6; i++) {
422
            load_seg_vm(i, new_segs[i]);
423
        }
424
        /* in vm86, CPL is always 3 */
425
        cpu_x86_set_cpl(env, 3);
426
    } else {
427
        /* CPL is set the RPL of CS */
428
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
429
        /* first just selectors as the rest may trigger exceptions */
430
        for (i = 0; i < 6; i++) {
431
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
432
        }
433
    }
434

    
435
    env->ldt.selector = new_ldt & ~4;
436
    env->ldt.base = 0;
437
    env->ldt.limit = 0;
438
    env->ldt.flags = 0;
439

    
440
    /* load the LDT */
441
    if (new_ldt & 4) {
442
        raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
443
    }
444

    
445
    if ((new_ldt & 0xfffc) != 0) {
446
        dt = &env->gdt;
447
        index = new_ldt & ~7;
448
        if ((index + 7) > dt->limit) {
449
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
450
        }
451
        ptr = dt->base + index;
452
        e1 = ldl_kernel(ptr);
453
        e2 = ldl_kernel(ptr + 4);
454
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
455
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
456
        }
457
        if (!(e2 & DESC_P_MASK)) {
458
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
459
        }
460
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
461
    }
462

    
463
    /* load the segments */
464
    if (!(new_eflags & VM_MASK)) {
465
        tss_load_seg(R_CS, new_segs[R_CS]);
466
        tss_load_seg(R_SS, new_segs[R_SS]);
467
        tss_load_seg(R_ES, new_segs[R_ES]);
468
        tss_load_seg(R_DS, new_segs[R_DS]);
469
        tss_load_seg(R_FS, new_segs[R_FS]);
470
        tss_load_seg(R_GS, new_segs[R_GS]);
471
    }
472

    
473
    /* check that EIP is in the CS segment limits */
474
    if (new_eip > env->segs[R_CS].limit) {
475
        /* XXX: different exception if CALL? */
476
        raise_exception_err(env, EXCP0D_GPF, 0);
477
    }
478

    
479
#ifndef CONFIG_USER_ONLY
480
    /* reset local breakpoints */
481
    if (env->dr[7] & 0x55) {
482
        for (i = 0; i < 4; i++) {
483
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
484
                hw_breakpoint_remove(env, i);
485
            }
486
        }
487
        env->dr[7] &= ~0x55;
488
    }
489
#endif
490
}
491

    
492
/* check if Port I/O is allowed in TSS */
493
static inline void check_io(int addr, int size)
494
{
495
    int io_offset, val, mask;
496

    
497
    /* TSS must be a valid 32 bit one */
498
    if (!(env->tr.flags & DESC_P_MASK) ||
499
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
500
        env->tr.limit < 103) {
501
        goto fail;
502
    }
503
    io_offset = lduw_kernel(env->tr.base + 0x66);
504
    io_offset += (addr >> 3);
505
    /* Note: the check needs two bytes */
506
    if ((io_offset + 1) > env->tr.limit) {
507
        goto fail;
508
    }
509
    val = lduw_kernel(env->tr.base + io_offset);
510
    val >>= (addr & 7);
511
    mask = (1 << size) - 1;
512
    /* all bits must be zero to allow the I/O */
513
    if ((val & mask) != 0) {
514
    fail:
515
        raise_exception_err(env, EXCP0D_GPF, 0);
516
    }
517
}
518

    
519
void helper_check_iob(uint32_t t0)
520
{
521
    check_io(t0, 1);
522
}
523

    
524
void helper_check_iow(uint32_t t0)
525
{
526
    check_io(t0, 2);
527
}
528

    
529
void helper_check_iol(uint32_t t0)
530
{
531
    check_io(t0, 4);
532
}
533

    
534
void helper_outb(uint32_t port, uint32_t data)
535
{
536
    cpu_outb(port, data & 0xff);
537
}
538

    
539
target_ulong helper_inb(uint32_t port)
540
{
541
    return cpu_inb(port);
542
}
543

    
544
void helper_outw(uint32_t port, uint32_t data)
545
{
546
    cpu_outw(port, data & 0xffff);
547
}
548

    
549
target_ulong helper_inw(uint32_t port)
550
{
551
    return cpu_inw(port);
552
}
553

    
554
void helper_outl(uint32_t port, uint32_t data)
555
{
556
    cpu_outl(port, data);
557
}
558

    
559
target_ulong helper_inl(uint32_t port)
560
{
561
    return cpu_inl(port);
562
}
563

    
564
static inline unsigned int get_sp_mask(unsigned int e2)
565
{
566
    if (e2 & DESC_B_MASK) {
567
        return 0xffffffff;
568
    } else {
569
        return 0xffff;
570
    }
571
}
572

    
573
static int exception_has_error_code(int intno)
574
{
575
    switch (intno) {
576
    case 8:
577
    case 10:
578
    case 11:
579
    case 12:
580
    case 13:
581
    case 14:
582
    case 17:
583
        return 1;
584
    }
585
    return 0;
586
}
587

    
588
#ifdef TARGET_X86_64
589
#define SET_ESP(val, sp_mask)                           \
590
    do {                                                \
591
        if ((sp_mask) == 0xffff) {                      \
592
            ESP = (ESP & ~0xffff) | ((val) & 0xffff);   \
593
        } else if ((sp_mask) == 0xffffffffLL) {         \
594
            ESP = (uint32_t)(val);                      \
595
        } else {                                        \
596
            ESP = (val);                                \
597
        }                                               \
598
    } while (0)
599
#else
600
#define SET_ESP(val, sp_mask)                           \
601
    do {                                                \
602
        ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
603
    } while (0)
604
#endif
605

    
606
/* in 64-bit machines, this can overflow. So this segment addition macro
607
 * can be used to trim the value to 32-bit whenever needed */
608
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
609

    
610
/* XXX: add a is_user flag to have proper security support */
611
#define PUSHW(ssp, sp, sp_mask, val)                    \
612
    {                                                   \
613
        sp -= 2;                                        \
614
        stw_kernel((ssp) + (sp & (sp_mask)), (val));    \
615
    }
616

    
617
#define PUSHL(ssp, sp, sp_mask, val)                                    \
618
    {                                                                   \
619
        sp -= 4;                                                        \
620
        stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));        \
621
    }
622

    
623
#define POPW(ssp, sp, sp_mask, val)                     \
624
    {                                                   \
625
        val = lduw_kernel((ssp) + (sp & (sp_mask)));    \
626
        sp += 2;                                        \
627
    }
628

    
629
#define POPL(ssp, sp, sp_mask, val)                             \
630
    {                                                           \
631
        val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
632
        sp += 4;                                                \
633
    }
634

    
635
/* protected mode interrupt */
636
static void do_interrupt_protected(int intno, int is_int, int error_code,
637
                                   unsigned int next_eip, int is_hw)
638
{
639
    SegmentCache *dt;
640
    target_ulong ptr, ssp;
641
    int type, dpl, selector, ss_dpl, cpl;
642
    int has_error_code, new_stack, shift;
643
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
644
    uint32_t old_eip, sp_mask;
645

    
646
    has_error_code = 0;
647
    if (!is_int && !is_hw) {
648
        has_error_code = exception_has_error_code(intno);
649
    }
650
    if (is_int) {
651
        old_eip = next_eip;
652
    } else {
653
        old_eip = env->eip;
654
    }
655

    
656
    dt = &env->idt;
657
    if (intno * 8 + 7 > dt->limit) {
658
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
659
    }
660
    ptr = dt->base + intno * 8;
661
    e1 = ldl_kernel(ptr);
662
    e2 = ldl_kernel(ptr + 4);
663
    /* check gate type */
664
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
665
    switch (type) {
666
    case 5: /* task gate */
667
        /* must do that check here to return the correct error code */
668
        if (!(e2 & DESC_P_MASK)) {
669
            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
670
        }
671
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
672
        if (has_error_code) {
673
            int type;
674
            uint32_t mask;
675

    
676
            /* push the error code */
677
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678
            shift = type >> 3;
679
            if (env->segs[R_SS].flags & DESC_B_MASK) {
680
                mask = 0xffffffff;
681
            } else {
682
                mask = 0xffff;
683
            }
684
            esp = (ESP - (2 << shift)) & mask;
685
            ssp = env->segs[R_SS].base + esp;
686
            if (shift) {
687
                stl_kernel(ssp, error_code);
688
            } else {
689
                stw_kernel(ssp, error_code);
690
            }
691
            SET_ESP(esp, mask);
692
        }
693
        return;
694
    case 6: /* 286 interrupt gate */
695
    case 7: /* 286 trap gate */
696
    case 14: /* 386 interrupt gate */
697
    case 15: /* 386 trap gate */
698
        break;
699
    default:
700
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
701
        break;
702
    }
703
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
704
    cpl = env->hflags & HF_CPL_MASK;
705
    /* check privilege if software int */
706
    if (is_int && dpl < cpl) {
707
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
708
    }
709
    /* check valid bit */
710
    if (!(e2 & DESC_P_MASK)) {
711
        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
712
    }
713
    selector = e1 >> 16;
714
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
715
    if ((selector & 0xfffc) == 0) {
716
        raise_exception_err(env, EXCP0D_GPF, 0);
717
    }
718
    if (load_segment(&e1, &e2, selector) != 0) {
719
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
720
    }
721
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
722
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
723
    }
724
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
725
    if (dpl > cpl) {
726
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
727
    }
728
    if (!(e2 & DESC_P_MASK)) {
729
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
730
    }
731
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
732
        /* to inner privilege */
733
        get_ss_esp_from_tss(&ss, &esp, dpl);
734
        if ((ss & 0xfffc) == 0) {
735
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
736
        }
737
        if ((ss & 3) != dpl) {
738
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
739
        }
740
        if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
741
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
742
        }
743
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
744
        if (ss_dpl != dpl) {
745
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
746
        }
747
        if (!(ss_e2 & DESC_S_MASK) ||
748
            (ss_e2 & DESC_CS_MASK) ||
749
            !(ss_e2 & DESC_W_MASK)) {
750
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
751
        }
752
        if (!(ss_e2 & DESC_P_MASK)) {
753
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
754
        }
755
        new_stack = 1;
756
        sp_mask = get_sp_mask(ss_e2);
757
        ssp = get_seg_base(ss_e1, ss_e2);
758
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759
        /* to same privilege */
760
        if (env->eflags & VM_MASK) {
761
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
762
        }
763
        new_stack = 0;
764
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
765
        ssp = env->segs[R_SS].base;
766
        esp = ESP;
767
        dpl = cpl;
768
    } else {
769
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
770
        new_stack = 0; /* avoid warning */
771
        sp_mask = 0; /* avoid warning */
772
        ssp = 0; /* avoid warning */
773
        esp = 0; /* avoid warning */
774
    }
775

    
776
    shift = type >> 3;
777

    
778
#if 0
779
    /* XXX: check that enough room is available */
780
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781
    if (env->eflags & VM_MASK) {
782
        push_size += 8;
783
    }
784
    push_size <<= shift;
785
#endif
786
    if (shift == 1) {
787
        if (new_stack) {
788
            if (env->eflags & VM_MASK) {
789
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
793
            }
794
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795
            PUSHL(ssp, esp, sp_mask, ESP);
796
        }
797
        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
798
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799
        PUSHL(ssp, esp, sp_mask, old_eip);
800
        if (has_error_code) {
801
            PUSHL(ssp, esp, sp_mask, error_code);
802
        }
803
    } else {
804
        if (new_stack) {
805
            if (env->eflags & VM_MASK) {
806
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
810
            }
811
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812
            PUSHW(ssp, esp, sp_mask, ESP);
813
        }
814
        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
815
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816
        PUSHW(ssp, esp, sp_mask, old_eip);
817
        if (has_error_code) {
818
            PUSHW(ssp, esp, sp_mask, error_code);
819
        }
820
    }
821

    
822
    if (new_stack) {
823
        if (env->eflags & VM_MASK) {
824
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
828
        }
829
        ss = (ss & ~3) | dpl;
830
        cpu_x86_load_seg_cache(env, R_SS, ss,
831
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
832
    }
833
    SET_ESP(esp, sp_mask);
834

    
835
    selector = (selector & ~3) | dpl;
836
    cpu_x86_load_seg_cache(env, R_CS, selector,
837
                   get_seg_base(e1, e2),
838
                   get_seg_limit(e1, e2),
839
                   e2);
840
    cpu_x86_set_cpl(env, dpl);
841
    env->eip = offset;
842

    
843
    /* interrupt gate clear IF mask */
844
    if ((type & 1) == 0) {
845
        env->eflags &= ~IF_MASK;
846
    }
847
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848
}
849

    
850
#ifdef TARGET_X86_64
851

    
852
#define PUSHQ(sp, val)                          \
853
    {                                           \
854
        sp -= 8;                                \
855
        stq_kernel(sp, (val));                  \
856
    }
857

    
858
#define POPQ(sp, val)                           \
859
    {                                           \
860
        val = ldq_kernel(sp);                   \
861
        sp += 8;                                \
862
    }
863

    
864
static inline target_ulong get_rsp_from_tss(int level)
865
{
866
    int index;
867

    
868
#if 0
869
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870
           env->tr.base, env->tr.limit);
871
#endif
872

    
873
    if (!(env->tr.flags & DESC_P_MASK)) {
874
        cpu_abort(env, "invalid tss");
875
    }
876
    index = 8 * level + 4;
877
    if ((index + 7) > env->tr.limit) {
878
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
879
    }
880
    return ldq_kernel(env->tr.base + index);
881
}
882

    
883
/* 64 bit interrupt */
884
static void do_interrupt64(int intno, int is_int, int error_code,
885
                           target_ulong next_eip, int is_hw)
886
{
887
    SegmentCache *dt;
888
    target_ulong ptr;
889
    int type, dpl, selector, cpl, ist;
890
    int has_error_code, new_stack;
891
    uint32_t e1, e2, e3, ss;
892
    target_ulong old_eip, esp, offset;
893

    
894
    has_error_code = 0;
895
    if (!is_int && !is_hw) {
896
        has_error_code = exception_has_error_code(intno);
897
    }
898
    if (is_int) {
899
        old_eip = next_eip;
900
    } else {
901
        old_eip = env->eip;
902
    }
903

    
904
    dt = &env->idt;
905
    if (intno * 16 + 15 > dt->limit) {
906
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
907
    }
908
    ptr = dt->base + intno * 16;
909
    e1 = ldl_kernel(ptr);
910
    e2 = ldl_kernel(ptr + 4);
911
    e3 = ldl_kernel(ptr + 8);
912
    /* check gate type */
913
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914
    switch (type) {
915
    case 14: /* 386 interrupt gate */
916
    case 15: /* 386 trap gate */
917
        break;
918
    default:
919
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
920
        break;
921
    }
922
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923
    cpl = env->hflags & HF_CPL_MASK;
924
    /* check privilege if software int */
925
    if (is_int && dpl < cpl) {
926
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
927
    }
928
    /* check valid bit */
929
    if (!(e2 & DESC_P_MASK)) {
930
        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
931
    }
932
    selector = e1 >> 16;
933
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934
    ist = e2 & 7;
935
    if ((selector & 0xfffc) == 0) {
936
        raise_exception_err(env, EXCP0D_GPF, 0);
937
    }
938

    
939
    if (load_segment(&e1, &e2, selector) != 0) {
940
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
941
    }
942
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
943
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
944
    }
945
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
946
    if (dpl > cpl) {
947
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
948
    }
949
    if (!(e2 & DESC_P_MASK)) {
950
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
951
    }
952
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
953
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
954
    }
955
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
956
        /* to inner privilege */
957
        if (ist != 0) {
958
            esp = get_rsp_from_tss(ist + 3);
959
        } else {
960
            esp = get_rsp_from_tss(dpl);
961
        }
962
        esp &= ~0xfLL; /* align stack */
963
        ss = 0;
964
        new_stack = 1;
965
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
966
        /* to same privilege */
967
        if (env->eflags & VM_MASK) {
968
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
969
        }
970
        new_stack = 0;
971
        if (ist != 0) {
972
            esp = get_rsp_from_tss(ist + 3);
973
        } else {
974
            esp = ESP;
975
        }
976
        esp &= ~0xfLL; /* align stack */
977
        dpl = cpl;
978
    } else {
979
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
980
        new_stack = 0; /* avoid warning */
981
        esp = 0; /* avoid warning */
982
    }
983

    
984
    PUSHQ(esp, env->segs[R_SS].selector);
985
    PUSHQ(esp, ESP);
986
    PUSHQ(esp, cpu_compute_eflags(env));
987
    PUSHQ(esp, env->segs[R_CS].selector);
988
    PUSHQ(esp, old_eip);
989
    if (has_error_code) {
990
        PUSHQ(esp, error_code);
991
    }
992

    
993
    if (new_stack) {
994
        ss = 0 | dpl;
995
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
996
    }
997
    ESP = esp;
998

    
999
    selector = (selector & ~3) | dpl;
1000
    cpu_x86_load_seg_cache(env, R_CS, selector,
1001
                   get_seg_base(e1, e2),
1002
                   get_seg_limit(e1, e2),
1003
                   e2);
1004
    cpu_x86_set_cpl(env, dpl);
1005
    env->eip = offset;
1006

    
1007
    /* interrupt gate clear IF mask */
1008
    if ((type & 1) == 0) {
1009
        env->eflags &= ~IF_MASK;
1010
    }
1011
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1012
}
1013
#endif
1014

    
1015
#ifdef TARGET_X86_64
1016
#if defined(CONFIG_USER_ONLY)
1017
void helper_syscall(int next_eip_addend)
1018
{
1019
    env->exception_index = EXCP_SYSCALL;
1020
    env->exception_next_eip = env->eip + next_eip_addend;
1021
    cpu_loop_exit(env);
1022
}
1023
#else
1024
void helper_syscall(int next_eip_addend)
1025
{
1026
    int selector;
1027

    
1028
    if (!(env->efer & MSR_EFER_SCE)) {
1029
        raise_exception_err(env, EXCP06_ILLOP, 0);
1030
    }
1031
    selector = (env->star >> 32) & 0xffff;
1032
    if (env->hflags & HF_LMA_MASK) {
1033
        int code64;
1034

    
1035
        ECX = env->eip + next_eip_addend;
1036
        env->regs[11] = cpu_compute_eflags(env);
1037

    
1038
        code64 = env->hflags & HF_CS64_MASK;
1039

    
1040
        cpu_x86_set_cpl(env, 0);
1041
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1042
                           0, 0xffffffff,
1043
                               DESC_G_MASK | DESC_P_MASK |
1044
                               DESC_S_MASK |
1045
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1046
                               DESC_L_MASK);
1047
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1048
                               0, 0xffffffff,
1049
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050
                               DESC_S_MASK |
1051
                               DESC_W_MASK | DESC_A_MASK);
1052
        env->eflags &= ~env->fmask;
1053
        cpu_load_eflags(env, env->eflags, 0);
1054
        if (code64) {
1055
            env->eip = env->lstar;
1056
        } else {
1057
            env->eip = env->cstar;
1058
        }
1059
    } else {
1060
        ECX = (uint32_t)(env->eip + next_eip_addend);
1061

    
1062
        cpu_x86_set_cpl(env, 0);
1063
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1064
                           0, 0xffffffff,
1065
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066
                               DESC_S_MASK |
1067
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1069
                               0, 0xffffffff,
1070
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071
                               DESC_S_MASK |
1072
                               DESC_W_MASK | DESC_A_MASK);
1073
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1074
        env->eip = (uint32_t)env->star;
1075
    }
1076
}
1077
#endif
1078
#endif
1079

    
1080
#ifdef TARGET_X86_64
1081
void helper_sysret(int dflag)
1082
{
1083
    int cpl, selector;
1084

    
1085
    if (!(env->efer & MSR_EFER_SCE)) {
1086
        raise_exception_err(env, EXCP06_ILLOP, 0);
1087
    }
1088
    cpl = env->hflags & HF_CPL_MASK;
1089
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1090
        raise_exception_err(env, EXCP0D_GPF, 0);
1091
    }
1092
    selector = (env->star >> 48) & 0xffff;
1093
    if (env->hflags & HF_LMA_MASK) {
1094
        if (dflag == 2) {
1095
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1096
                                   0, 0xffffffff,
1097
                                   DESC_G_MASK | DESC_P_MASK |
1098
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1100
                                   DESC_L_MASK);
1101
            env->eip = ECX;
1102
        } else {
1103
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104
                                   0, 0xffffffff,
1105
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108
            env->eip = (uint32_t)ECX;
1109
        }
1110
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1111
                               0, 0xffffffff,
1112
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114
                               DESC_W_MASK | DESC_A_MASK);
1115
        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1116
                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1117
                        NT_MASK);
1118
        cpu_x86_set_cpl(env, 3);
1119
    } else {
1120
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1121
                               0, 0xffffffff,
1122
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1123
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1124
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1125
        env->eip = (uint32_t)ECX;
1126
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1127
                               0, 0xffffffff,
1128
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1129
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1130
                               DESC_W_MASK | DESC_A_MASK);
1131
        env->eflags |= IF_MASK;
1132
        cpu_x86_set_cpl(env, 3);
1133
    }
1134
}
1135
#endif
1136

    
1137
/* real mode interrupt */
1138
static void do_interrupt_real(int intno, int is_int, int error_code,
1139
                              unsigned int next_eip)
1140
{
1141
    SegmentCache *dt;
1142
    target_ulong ptr, ssp;
1143
    int selector;
1144
    uint32_t offset, esp;
1145
    uint32_t old_cs, old_eip;
1146

    
1147
    /* real mode (simpler!) */
1148
    dt = &env->idt;
1149
    if (intno * 4 + 3 > dt->limit) {
1150
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1151
    }
1152
    ptr = dt->base + intno * 4;
1153
    offset = lduw_kernel(ptr);
1154
    selector = lduw_kernel(ptr + 2);
1155
    esp = ESP;
1156
    ssp = env->segs[R_SS].base;
1157
    if (is_int) {
1158
        old_eip = next_eip;
1159
    } else {
1160
        old_eip = env->eip;
1161
    }
1162
    old_cs = env->segs[R_CS].selector;
1163
    /* XXX: use SS segment size? */
1164
    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1165
    PUSHW(ssp, esp, 0xffff, old_cs);
1166
    PUSHW(ssp, esp, 0xffff, old_eip);
1167

    
1168
    /* update processor state */
1169
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1170
    env->eip = offset;
1171
    env->segs[R_CS].selector = selector;
1172
    env->segs[R_CS].base = (selector << 4);
1173
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1174
}
1175

    
1176
#if defined(CONFIG_USER_ONLY)
1177
/* fake user mode interrupt */
1178
static void do_interrupt_user(int intno, int is_int, int error_code,
1179
                              target_ulong next_eip)
1180
{
1181
    SegmentCache *dt;
1182
    target_ulong ptr;
1183
    int dpl, cpl, shift;
1184
    uint32_t e2;
1185

    
1186
    dt = &env->idt;
1187
    if (env->hflags & HF_LMA_MASK) {
1188
        shift = 4;
1189
    } else {
1190
        shift = 3;
1191
    }
1192
    ptr = dt->base + (intno << shift);
1193
    e2 = ldl_kernel(ptr + 4);
1194

    
1195
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1196
    cpl = env->hflags & HF_CPL_MASK;
1197
    /* check privilege if software int */
1198
    if (is_int && dpl < cpl) {
1199
        raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1200
    }
1201

    
1202
    /* Since we emulate only user space, we cannot do more than
1203
       exiting the emulation with the suitable exception and error
1204
       code */
1205
    if (is_int) {
1206
        EIP = next_eip;
1207
    }
1208
}
1209

    
1210
#else
1211

    
1212
static void handle_even_inj(int intno, int is_int, int error_code,
1213
                            int is_hw, int rm)
1214
{
1215
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1216
                                                          control.event_inj));
1217

    
1218
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1219
        int type;
1220

    
1221
        if (is_int) {
1222
            type = SVM_EVTINJ_TYPE_SOFT;
1223
        } else {
1224
            type = SVM_EVTINJ_TYPE_EXEPT;
1225
        }
1226
        event_inj = intno | type | SVM_EVTINJ_VALID;
1227
        if (!rm && exception_has_error_code(intno)) {
1228
            event_inj |= SVM_EVTINJ_VALID_ERR;
1229
            stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1230
                                             control.event_inj_err),
1231
                     error_code);
1232
        }
1233
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1234
                 event_inj);
1235
    }
1236
}
1237
#endif
1238

    
1239
/*
1240
 * Begin execution of an interruption. is_int is TRUE if coming from
1241
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1242
 * instruction. It is only relevant if is_int is TRUE.
1243
 */
1244
static void do_interrupt_all(int intno, int is_int, int error_code,
1245
                             target_ulong next_eip, int is_hw)
1246
{
1247
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1248
        if ((env->cr[0] & CR0_PE_MASK)) {
1249
            static int count;
1250

    
1251
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1252
                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1253
                     count, intno, error_code, is_int,
1254
                     env->hflags & HF_CPL_MASK,
1255
                     env->segs[R_CS].selector, EIP,
1256
                     (int)env->segs[R_CS].base + EIP,
1257
                     env->segs[R_SS].selector, ESP);
1258
            if (intno == 0x0e) {
1259
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1260
            } else {
1261
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1262
            }
1263
            qemu_log("\n");
1264
            log_cpu_state(env, X86_DUMP_CCOP);
1265
#if 0
1266
            {
1267
                int i;
1268
                target_ulong ptr;
1269

1270
                qemu_log("       code=");
1271
                ptr = env->segs[R_CS].base + env->eip;
1272
                for (i = 0; i < 16; i++) {
1273
                    qemu_log(" %02x", ldub(ptr + i));
1274
                }
1275
                qemu_log("\n");
1276
            }
1277
#endif
1278
            count++;
1279
        }
1280
    }
1281
    if (env->cr[0] & CR0_PE_MASK) {
1282
#if !defined(CONFIG_USER_ONLY)
1283
        if (env->hflags & HF_SVMI_MASK) {
1284
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1285
        }
1286
#endif
1287
#ifdef TARGET_X86_64
1288
        if (env->hflags & HF_LMA_MASK) {
1289
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1290
        } else
1291
#endif
1292
        {
1293
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1294
        }
1295
    } else {
1296
#if !defined(CONFIG_USER_ONLY)
1297
        if (env->hflags & HF_SVMI_MASK) {
1298
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1299
        }
1300
#endif
1301
        do_interrupt_real(intno, is_int, error_code, next_eip);
1302
    }
1303

    
1304
#if !defined(CONFIG_USER_ONLY)
1305
    if (env->hflags & HF_SVMI_MASK) {
1306
        uint32_t event_inj = ldl_phys(env->vm_vmcb +
1307
                                      offsetof(struct vmcb,
1308
                                               control.event_inj));
1309

    
1310
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1311
                 event_inj & ~SVM_EVTINJ_VALID);
1312
    }
1313
#endif
1314
}
1315

    
1316
void do_interrupt(CPUX86State *env1)
1317
{
1318
    CPUX86State *saved_env;
1319

    
1320
    saved_env = env;
1321
    env = env1;
1322
#if defined(CONFIG_USER_ONLY)
1323
    /* if user mode only, we simulate a fake exception
1324
       which will be handled outside the cpu execution
1325
       loop */
1326
    do_interrupt_user(env->exception_index,
1327
                      env->exception_is_int,
1328
                      env->error_code,
1329
                      env->exception_next_eip);
1330
    /* successfully delivered */
1331
    env->old_exception = -1;
1332
#else
1333
    /* simulate a real cpu exception. On i386, it can
1334
       trigger new exceptions, but we do not handle
1335
       double or triple faults yet. */
1336
    do_interrupt_all(env->exception_index,
1337
                     env->exception_is_int,
1338
                     env->error_code,
1339
                     env->exception_next_eip, 0);
1340
    /* successfully delivered */
1341
    env->old_exception = -1;
1342
#endif
1343
    env = saved_env;
1344
}
1345

    
1346
void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1347
{
1348
    CPUX86State *saved_env;
1349

    
1350
    saved_env = env;
1351
    env = env1;
1352
    do_interrupt_all(intno, 0, 0, 0, is_hw);
1353
    env = saved_env;
1354
}
1355

    
1356
/* SMM support */
1357

    
1358
#if defined(CONFIG_USER_ONLY)
1359

    
1360
void do_smm_enter(CPUX86State *env1)
1361
{
1362
}
1363

    
1364
void helper_rsm(void)
1365
{
1366
}
1367

    
1368
#else
1369

    
1370
#ifdef TARGET_X86_64
1371
#define SMM_REVISION_ID 0x00020064
1372
#else
1373
#define SMM_REVISION_ID 0x00020000
1374
#endif
1375

    
1376
void do_smm_enter(CPUX86State *env1)
1377
{
1378
    target_ulong sm_state;
1379
    SegmentCache *dt;
1380
    int i, offset;
1381
    CPUX86State *saved_env;
1382

    
1383
    saved_env = env;
1384
    env = env1;
1385

    
1386
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1387
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1388

    
1389
    env->hflags |= HF_SMM_MASK;
1390
    cpu_smm_update(env);
1391

    
1392
    sm_state = env->smbase + 0x8000;
1393

    
1394
#ifdef TARGET_X86_64
1395
    for (i = 0; i < 6; i++) {
1396
        dt = &env->segs[i];
1397
        offset = 0x7e00 + i * 16;
1398
        stw_phys(sm_state + offset, dt->selector);
1399
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1400
        stl_phys(sm_state + offset + 4, dt->limit);
1401
        stq_phys(sm_state + offset + 8, dt->base);
1402
    }
1403

    
1404
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1405
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1406

    
1407
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1408
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1409
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1410
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1411

    
1412
    stq_phys(sm_state + 0x7e88, env->idt.base);
1413
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1414

    
1415
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1416
    stq_phys(sm_state + 0x7e98, env->tr.base);
1417
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1418
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1419

    
1420
    stq_phys(sm_state + 0x7ed0, env->efer);
1421

    
1422
    stq_phys(sm_state + 0x7ff8, EAX);
1423
    stq_phys(sm_state + 0x7ff0, ECX);
1424
    stq_phys(sm_state + 0x7fe8, EDX);
1425
    stq_phys(sm_state + 0x7fe0, EBX);
1426
    stq_phys(sm_state + 0x7fd8, ESP);
1427
    stq_phys(sm_state + 0x7fd0, EBP);
1428
    stq_phys(sm_state + 0x7fc8, ESI);
1429
    stq_phys(sm_state + 0x7fc0, EDI);
1430
    for (i = 8; i < 16; i++) {
1431
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1432
    }
1433
    stq_phys(sm_state + 0x7f78, env->eip);
1434
    stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
1435
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1436
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1437

    
1438
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1439
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1440
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1441

    
1442
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443
    stl_phys(sm_state + 0x7f00, env->smbase);
1444
#else
1445
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1446
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1447
    stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
1448
    stl_phys(sm_state + 0x7ff0, env->eip);
1449
    stl_phys(sm_state + 0x7fec, EDI);
1450
    stl_phys(sm_state + 0x7fe8, ESI);
1451
    stl_phys(sm_state + 0x7fe4, EBP);
1452
    stl_phys(sm_state + 0x7fe0, ESP);
1453
    stl_phys(sm_state + 0x7fdc, EBX);
1454
    stl_phys(sm_state + 0x7fd8, EDX);
1455
    stl_phys(sm_state + 0x7fd4, ECX);
1456
    stl_phys(sm_state + 0x7fd0, EAX);
1457
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1458
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1459

    
1460
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1461
    stl_phys(sm_state + 0x7f64, env->tr.base);
1462
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1463
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1464

    
1465
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1466
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1467
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1468
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1469

    
1470
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1471
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1472

    
1473
    stl_phys(sm_state + 0x7f58, env->idt.base);
1474
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1475

    
1476
    for (i = 0; i < 6; i++) {
1477
        dt = &env->segs[i];
1478
        if (i < 3) {
1479
            offset = 0x7f84 + i * 12;
1480
        } else {
1481
            offset = 0x7f2c + (i - 3) * 12;
1482
        }
1483
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1484
        stl_phys(sm_state + offset + 8, dt->base);
1485
        stl_phys(sm_state + offset + 4, dt->limit);
1486
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1487
    }
1488
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1489

    
1490
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1491
    stl_phys(sm_state + 0x7ef8, env->smbase);
1492
#endif
1493
    /* init SMM cpu state */
1494

    
1495
#ifdef TARGET_X86_64
1496
    cpu_load_efer(env, 0);
1497
#endif
1498
    cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
1499
                              DF_MASK));
1500
    env->eip = 0x00008000;
1501
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1502
                           0xffffffff, 0);
1503
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1504
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1505
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1506
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1507
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1508

    
1509
    cpu_x86_update_cr0(env,
1510
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1511
                                      CR0_PG_MASK));
1512
    cpu_x86_update_cr4(env, 0);
1513
    env->dr[7] = 0x00000400;
1514
    CC_OP = CC_OP_EFLAGS;
1515
    env = saved_env;
1516
}
1517

    
1518
void helper_rsm(void)
1519
{
1520
    target_ulong sm_state;
1521
    int i, offset;
1522
    uint32_t val;
1523

    
1524
    sm_state = env->smbase + 0x8000;
1525
#ifdef TARGET_X86_64
1526
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1527

    
1528
    for (i = 0; i < 6; i++) {
1529
        offset = 0x7e00 + i * 16;
1530
        cpu_x86_load_seg_cache(env, i,
1531
                               lduw_phys(sm_state + offset),
1532
                               ldq_phys(sm_state + offset + 8),
1533
                               ldl_phys(sm_state + offset + 4),
1534
                               (lduw_phys(sm_state + offset + 2) &
1535
                                0xf0ff) << 8);
1536
    }
1537

    
1538
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1539
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1540

    
1541
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1542
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1543
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1544
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1545

    
1546
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1547
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1548

    
1549
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1550
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1551
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1552
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1553

    
1554
    EAX = ldq_phys(sm_state + 0x7ff8);
1555
    ECX = ldq_phys(sm_state + 0x7ff0);
1556
    EDX = ldq_phys(sm_state + 0x7fe8);
1557
    EBX = ldq_phys(sm_state + 0x7fe0);
1558
    ESP = ldq_phys(sm_state + 0x7fd8);
1559
    EBP = ldq_phys(sm_state + 0x7fd0);
1560
    ESI = ldq_phys(sm_state + 0x7fc8);
1561
    EDI = ldq_phys(sm_state + 0x7fc0);
1562
    for (i = 8; i < 16; i++) {
1563
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1564
    }
1565
    env->eip = ldq_phys(sm_state + 0x7f78);
1566
    cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
1567
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1568
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1569
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1570

    
1571
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1572
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1573
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1574

    
1575
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1576
    if (val & 0x20000) {
1577
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1578
    }
1579
#else
1580
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1581
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1582
    cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
1583
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1584
    env->eip = ldl_phys(sm_state + 0x7ff0);
1585
    EDI = ldl_phys(sm_state + 0x7fec);
1586
    ESI = ldl_phys(sm_state + 0x7fe8);
1587
    EBP = ldl_phys(sm_state + 0x7fe4);
1588
    ESP = ldl_phys(sm_state + 0x7fe0);
1589
    EBX = ldl_phys(sm_state + 0x7fdc);
1590
    EDX = ldl_phys(sm_state + 0x7fd8);
1591
    ECX = ldl_phys(sm_state + 0x7fd4);
1592
    EAX = ldl_phys(sm_state + 0x7fd0);
1593
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1594
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1595

    
1596
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1597
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1598
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1599
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1600

    
1601
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1602
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1603
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1604
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1605

    
1606
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1607
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1608

    
1609
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1610
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1611

    
1612
    for (i = 0; i < 6; i++) {
1613
        if (i < 3) {
1614
            offset = 0x7f84 + i * 12;
1615
        } else {
1616
            offset = 0x7f2c + (i - 3) * 12;
1617
        }
1618
        cpu_x86_load_seg_cache(env, i,
1619
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1620
                               ldl_phys(sm_state + offset + 8),
1621
                               ldl_phys(sm_state + offset + 4),
1622
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1623
    }
1624
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1625

    
1626
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1627
    if (val & 0x20000) {
1628
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1629
    }
1630
#endif
1631
    CC_OP = CC_OP_EFLAGS;
1632
    env->hflags &= ~HF_SMM_MASK;
1633
    cpu_smm_update(env);
1634

    
1635
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1636
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1637
}
1638

    
1639
#endif /* !CONFIG_USER_ONLY */
1640

    
1641
void helper_into(int next_eip_addend)
1642
{
1643
    int eflags;
1644

    
1645
    eflags = helper_cc_compute_all(CC_OP);
1646
    if (eflags & CC_O) {
1647
        raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
1648
    }
1649
}
1650

    
1651
void helper_cmpxchg8b(target_ulong a0)
1652
{
1653
    uint64_t d;
1654
    int eflags;
1655

    
1656
    eflags = helper_cc_compute_all(CC_OP);
1657
    d = ldq(a0);
1658
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1659
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1660
        eflags |= CC_Z;
1661
    } else {
1662
        /* always do the store */
1663
        stq(a0, d);
1664
        EDX = (uint32_t)(d >> 32);
1665
        EAX = (uint32_t)d;
1666
        eflags &= ~CC_Z;
1667
    }
1668
    CC_SRC = eflags;
1669
}
1670

    
1671
#ifdef TARGET_X86_64
1672
void helper_cmpxchg16b(target_ulong a0)
1673
{
1674
    uint64_t d0, d1;
1675
    int eflags;
1676

    
1677
    if ((a0 & 0xf) != 0) {
1678
        raise_exception(env, EXCP0D_GPF);
1679
    }
1680
    eflags = helper_cc_compute_all(CC_OP);
1681
    d0 = ldq(a0);
1682
    d1 = ldq(a0 + 8);
1683
    if (d0 == EAX && d1 == EDX) {
1684
        stq(a0, EBX);
1685
        stq(a0 + 8, ECX);
1686
        eflags |= CC_Z;
1687
    } else {
1688
        /* always do the store */
1689
        stq(a0, d0);
1690
        stq(a0 + 8, d1);
1691
        EDX = d1;
1692
        EAX = d0;
1693
        eflags &= ~CC_Z;
1694
    }
1695
    CC_SRC = eflags;
1696
}
1697
#endif
1698

    
1699
void helper_single_step(void)
1700
{
1701
#ifndef CONFIG_USER_ONLY
1702
    check_hw_breakpoints(env, 1);
1703
    env->dr[6] |= DR6_BS;
1704
#endif
1705
    raise_exception(env, EXCP01_DB);
1706
}
1707

    
1708
void helper_cpuid(void)
1709
{
1710
    uint32_t eax, ebx, ecx, edx;
1711

    
1712
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1713

    
1714
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1715
    EAX = eax;
1716
    EBX = ebx;
1717
    ECX = ecx;
1718
    EDX = edx;
1719
}
1720

    
1721
void helper_enter_level(int level, int data32, target_ulong t1)
1722
{
1723
    target_ulong ssp;
1724
    uint32_t esp_mask, esp, ebp;
1725

    
1726
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
1727
    ssp = env->segs[R_SS].base;
1728
    ebp = EBP;
1729
    esp = ESP;
1730
    if (data32) {
1731
        /* 32 bit */
1732
        esp -= 4;
1733
        while (--level) {
1734
            esp -= 4;
1735
            ebp -= 4;
1736
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1737
        }
1738
        esp -= 4;
1739
        stl(ssp + (esp & esp_mask), t1);
1740
    } else {
1741
        /* 16 bit */
1742
        esp -= 2;
1743
        while (--level) {
1744
            esp -= 2;
1745
            ebp -= 2;
1746
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1747
        }
1748
        esp -= 2;
1749
        stw(ssp + (esp & esp_mask), t1);
1750
    }
1751
}
1752

    
1753
#ifdef TARGET_X86_64
1754
void helper_enter64_level(int level, int data64, target_ulong t1)
1755
{
1756
    target_ulong esp, ebp;
1757

    
1758
    ebp = EBP;
1759
    esp = ESP;
1760

    
1761
    if (data64) {
1762
        /* 64 bit */
1763
        esp -= 8;
1764
        while (--level) {
1765
            esp -= 8;
1766
            ebp -= 8;
1767
            stq(esp, ldq(ebp));
1768
        }
1769
        esp -= 8;
1770
        stq(esp, t1);
1771
    } else {
1772
        /* 16 bit */
1773
        esp -= 2;
1774
        while (--level) {
1775
            esp -= 2;
1776
            ebp -= 2;
1777
            stw(esp, lduw(ebp));
1778
        }
1779
        esp -= 2;
1780
        stw(esp, t1);
1781
    }
1782
}
1783
#endif
1784

    
1785
void helper_lldt(int selector)
1786
{
1787
    SegmentCache *dt;
1788
    uint32_t e1, e2;
1789
    int index, entry_limit;
1790
    target_ulong ptr;
1791

    
1792
    selector &= 0xffff;
1793
    if ((selector & 0xfffc) == 0) {
1794
        /* XXX: NULL selector case: invalid LDT */
1795
        env->ldt.base = 0;
1796
        env->ldt.limit = 0;
1797
    } else {
1798
        if (selector & 0x4) {
1799
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1800
        }
1801
        dt = &env->gdt;
1802
        index = selector & ~7;
1803
#ifdef TARGET_X86_64
1804
        if (env->hflags & HF_LMA_MASK) {
1805
            entry_limit = 15;
1806
        } else
1807
#endif
1808
        {
1809
            entry_limit = 7;
1810
        }
1811
        if ((index + entry_limit) > dt->limit) {
1812
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1813
        }
1814
        ptr = dt->base + index;
1815
        e1 = ldl_kernel(ptr);
1816
        e2 = ldl_kernel(ptr + 4);
1817
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1818
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1819
        }
1820
        if (!(e2 & DESC_P_MASK)) {
1821
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1822
        }
1823
#ifdef TARGET_X86_64
1824
        if (env->hflags & HF_LMA_MASK) {
1825
            uint32_t e3;
1826

    
1827
            e3 = ldl_kernel(ptr + 8);
1828
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1829
            env->ldt.base |= (target_ulong)e3 << 32;
1830
        } else
1831
#endif
1832
        {
1833
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
1834
        }
1835
    }
1836
    env->ldt.selector = selector;
1837
}
1838

    
1839
void helper_ltr(int selector)
1840
{
1841
    SegmentCache *dt;
1842
    uint32_t e1, e2;
1843
    int index, type, entry_limit;
1844
    target_ulong ptr;
1845

    
1846
    selector &= 0xffff;
1847
    if ((selector & 0xfffc) == 0) {
1848
        /* NULL selector case: invalid TR */
1849
        env->tr.base = 0;
1850
        env->tr.limit = 0;
1851
        env->tr.flags = 0;
1852
    } else {
1853
        if (selector & 0x4) {
1854
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1855
        }
1856
        dt = &env->gdt;
1857
        index = selector & ~7;
1858
#ifdef TARGET_X86_64
1859
        if (env->hflags & HF_LMA_MASK) {
1860
            entry_limit = 15;
1861
        } else
1862
#endif
1863
        {
1864
            entry_limit = 7;
1865
        }
1866
        if ((index + entry_limit) > dt->limit) {
1867
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1868
        }
1869
        ptr = dt->base + index;
1870
        e1 = ldl_kernel(ptr);
1871
        e2 = ldl_kernel(ptr + 4);
1872
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1873
        if ((e2 & DESC_S_MASK) ||
1874
            (type != 1 && type != 9)) {
1875
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1876
        }
1877
        if (!(e2 & DESC_P_MASK)) {
1878
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1879
        }
1880
#ifdef TARGET_X86_64
1881
        if (env->hflags & HF_LMA_MASK) {
1882
            uint32_t e3, e4;
1883

    
1884
            e3 = ldl_kernel(ptr + 8);
1885
            e4 = ldl_kernel(ptr + 12);
1886
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1887
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1888
            }
1889
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1890
            env->tr.base |= (target_ulong)e3 << 32;
1891
        } else
1892
#endif
1893
        {
1894
            load_seg_cache_raw_dt(&env->tr, e1, e2);
1895
        }
1896
        e2 |= DESC_TSS_BUSY_MASK;
1897
        stl_kernel(ptr + 4, e2);
1898
    }
1899
    env->tr.selector = selector;
1900
}
1901

    
1902
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1903
void helper_load_seg(int seg_reg, int selector)
1904
{
1905
    uint32_t e1, e2;
1906
    int cpl, dpl, rpl;
1907
    SegmentCache *dt;
1908
    int index;
1909
    target_ulong ptr;
1910

    
1911
    selector &= 0xffff;
1912
    cpl = env->hflags & HF_CPL_MASK;
1913
    if ((selector & 0xfffc) == 0) {
1914
        /* null selector case */
1915
        if (seg_reg == R_SS
1916
#ifdef TARGET_X86_64
1917
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1918
#endif
1919
            ) {
1920
            raise_exception_err(env, EXCP0D_GPF, 0);
1921
        }
1922
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1923
    } else {
1924

    
1925
        if (selector & 0x4) {
1926
            dt = &env->ldt;
1927
        } else {
1928
            dt = &env->gdt;
1929
        }
1930
        index = selector & ~7;
1931
        if ((index + 7) > dt->limit) {
1932
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1933
        }
1934
        ptr = dt->base + index;
1935
        e1 = ldl_kernel(ptr);
1936
        e2 = ldl_kernel(ptr + 4);
1937

    
1938
        if (!(e2 & DESC_S_MASK)) {
1939
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1940
        }
1941
        rpl = selector & 3;
1942
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1943
        if (seg_reg == R_SS) {
1944
            /* must be writable segment */
1945
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1946
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1947
            }
1948
            if (rpl != cpl || dpl != cpl) {
1949
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1950
            }
1951
        } else {
1952
            /* must be readable segment */
1953
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1954
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1955
            }
1956

    
1957
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1958
                /* if not conforming code, test rights */
1959
                if (dpl < cpl || dpl < rpl) {
1960
                    raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1961
                }
1962
            }
1963
        }
1964

    
1965
        if (!(e2 & DESC_P_MASK)) {
1966
            if (seg_reg == R_SS) {
1967
                raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1968
            } else {
1969
                raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1970
            }
1971
        }
1972

    
1973
        /* set the access bit if not already set */
1974
        if (!(e2 & DESC_A_MASK)) {
1975
            e2 |= DESC_A_MASK;
1976
            stl_kernel(ptr + 4, e2);
1977
        }
1978

    
1979
        cpu_x86_load_seg_cache(env, seg_reg, selector,
1980
                       get_seg_base(e1, e2),
1981
                       get_seg_limit(e1, e2),
1982
                       e2);
1983
#if 0
1984
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1985
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
1986
#endif
1987
    }
1988
}
1989

    
1990
/* protected mode jump */
1991
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
1992
                           int next_eip_addend)
1993
{
1994
    int gate_cs, type;
1995
    uint32_t e1, e2, cpl, dpl, rpl, limit;
1996
    target_ulong next_eip;
1997

    
1998
    if ((new_cs & 0xfffc) == 0) {
1999
        raise_exception_err(env, EXCP0D_GPF, 0);
2000
    }
2001
    if (load_segment(&e1, &e2, new_cs) != 0) {
2002
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2003
    }
2004
    cpl = env->hflags & HF_CPL_MASK;
2005
    if (e2 & DESC_S_MASK) {
2006
        if (!(e2 & DESC_CS_MASK)) {
2007
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2008
        }
2009
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2010
        if (e2 & DESC_C_MASK) {
2011
            /* conforming code segment */
2012
            if (dpl > cpl) {
2013
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2014
            }
2015
        } else {
2016
            /* non conforming code segment */
2017
            rpl = new_cs & 3;
2018
            if (rpl > cpl) {
2019
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2020
            }
2021
            if (dpl != cpl) {
2022
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2023
            }
2024
        }
2025
        if (!(e2 & DESC_P_MASK)) {
2026
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2027
        }
2028
        limit = get_seg_limit(e1, e2);
2029
        if (new_eip > limit &&
2030
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
2031
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2032
        }
2033
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2034
                       get_seg_base(e1, e2), limit, e2);
2035
        EIP = new_eip;
2036
    } else {
2037
        /* jump to call or task gate */
2038
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2039
        rpl = new_cs & 3;
2040
        cpl = env->hflags & HF_CPL_MASK;
2041
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2042
        switch (type) {
2043
        case 1: /* 286 TSS */
2044
        case 9: /* 386 TSS */
2045
        case 5: /* task gate */
2046
            if (dpl < cpl || dpl < rpl) {
2047
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2048
            }
2049
            next_eip = env->eip + next_eip_addend;
2050
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2051
            CC_OP = CC_OP_EFLAGS;
2052
            break;
2053
        case 4: /* 286 call gate */
2054
        case 12: /* 386 call gate */
2055
            if ((dpl < cpl) || (dpl < rpl)) {
2056
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2057
            }
2058
            if (!(e2 & DESC_P_MASK)) {
2059
                raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2060
            }
2061
            gate_cs = e1 >> 16;
2062
            new_eip = (e1 & 0xffff);
2063
            if (type == 12) {
2064
                new_eip |= (e2 & 0xffff0000);
2065
            }
2066
            if (load_segment(&e1, &e2, gate_cs) != 0) {
2067
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2068
            }
2069
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2070
            /* must be code segment */
2071
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2072
                 (DESC_S_MASK | DESC_CS_MASK))) {
2073
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2074
            }
2075
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2076
                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
2077
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2078
            }
2079
            if (!(e2 & DESC_P_MASK)) {
2080
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2081
            }
2082
            limit = get_seg_limit(e1, e2);
2083
            if (new_eip > limit) {
2084
                raise_exception_err(env, EXCP0D_GPF, 0);
2085
            }
2086
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2087
                                   get_seg_base(e1, e2), limit, e2);
2088
            EIP = new_eip;
2089
            break;
2090
        default:
2091
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2092
            break;
2093
        }
2094
    }
2095
}
2096

    
2097
/* real mode call */
2098
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2099
                       int shift, int next_eip)
2100
{
2101
    int new_eip;
2102
    uint32_t esp, esp_mask;
2103
    target_ulong ssp;
2104

    
2105
    new_eip = new_eip1;
2106
    esp = ESP;
2107
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2108
    ssp = env->segs[R_SS].base;
2109
    if (shift) {
2110
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2111
        PUSHL(ssp, esp, esp_mask, next_eip);
2112
    } else {
2113
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2114
        PUSHW(ssp, esp, esp_mask, next_eip);
2115
    }
2116

    
2117
    SET_ESP(esp, esp_mask);
2118
    env->eip = new_eip;
2119
    env->segs[R_CS].selector = new_cs;
2120
    env->segs[R_CS].base = (new_cs << 4);
2121
}
2122

    
2123
/* protected mode call */
2124
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2125
                            int shift, int next_eip_addend)
2126
{
2127
    int new_stack, i;
2128
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2129
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2130
    uint32_t val, limit, old_sp_mask;
2131
    target_ulong ssp, old_ssp, next_eip;
2132

    
2133
    next_eip = env->eip + next_eip_addend;
2134
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2135
    LOG_PCALL_STATE(env);
2136
    if ((new_cs & 0xfffc) == 0) {
2137
        raise_exception_err(env, EXCP0D_GPF, 0);
2138
    }
2139
    if (load_segment(&e1, &e2, new_cs) != 0) {
2140
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2141
    }
2142
    cpl = env->hflags & HF_CPL_MASK;
2143
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2144
    if (e2 & DESC_S_MASK) {
2145
        if (!(e2 & DESC_CS_MASK)) {
2146
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2147
        }
2148
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2149
        if (e2 & DESC_C_MASK) {
2150
            /* conforming code segment */
2151
            if (dpl > cpl) {
2152
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2153
            }
2154
        } else {
2155
            /* non conforming code segment */
2156
            rpl = new_cs & 3;
2157
            if (rpl > cpl) {
2158
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2159
            }
2160
            if (dpl != cpl) {
2161
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2162
            }
2163
        }
2164
        if (!(e2 & DESC_P_MASK)) {
2165
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2166
        }
2167

    
2168
#ifdef TARGET_X86_64
2169
        /* XXX: check 16/32 bit cases in long mode */
2170
        if (shift == 2) {
2171
            target_ulong rsp;
2172

    
2173
            /* 64 bit case */
2174
            rsp = ESP;
2175
            PUSHQ(rsp, env->segs[R_CS].selector);
2176
            PUSHQ(rsp, next_eip);
2177
            /* from this point, not restartable */
2178
            ESP = rsp;
2179
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2180
                                   get_seg_base(e1, e2),
2181
                                   get_seg_limit(e1, e2), e2);
2182
            EIP = new_eip;
2183
        } else
2184
#endif
2185
        {
2186
            sp = ESP;
2187
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2188
            ssp = env->segs[R_SS].base;
2189
            if (shift) {
2190
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2191
                PUSHL(ssp, sp, sp_mask, next_eip);
2192
            } else {
2193
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2194
                PUSHW(ssp, sp, sp_mask, next_eip);
2195
            }
2196

    
2197
            limit = get_seg_limit(e1, e2);
2198
            if (new_eip > limit) {
2199
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2200
            }
2201
            /* from this point, not restartable */
2202
            SET_ESP(sp, sp_mask);
2203
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2204
                                   get_seg_base(e1, e2), limit, e2);
2205
            EIP = new_eip;
2206
        }
2207
    } else {
2208
        /* check gate type */
2209
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2210
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211
        rpl = new_cs & 3;
2212
        switch (type) {
2213
        case 1: /* available 286 TSS */
2214
        case 9: /* available 386 TSS */
2215
        case 5: /* task gate */
2216
            if (dpl < cpl || dpl < rpl) {
2217
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2218
            }
2219
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2220
            CC_OP = CC_OP_EFLAGS;
2221
            return;
2222
        case 4: /* 286 call gate */
2223
        case 12: /* 386 call gate */
2224
            break;
2225
        default:
2226
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2227
            break;
2228
        }
2229
        shift = type >> 3;
2230

    
2231
        if (dpl < cpl || dpl < rpl) {
2232
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2233
        }
2234
        /* check valid bit */
2235
        if (!(e2 & DESC_P_MASK)) {
2236
            raise_exception_err(env, EXCP0B_NOSEG,  new_cs & 0xfffc);
2237
        }
2238
        selector = e1 >> 16;
2239
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2240
        param_count = e2 & 0x1f;
2241
        if ((selector & 0xfffc) == 0) {
2242
            raise_exception_err(env, EXCP0D_GPF, 0);
2243
        }
2244

    
2245
        if (load_segment(&e1, &e2, selector) != 0) {
2246
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2247
        }
2248
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
2249
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2250
        }
2251
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2252
        if (dpl > cpl) {
2253
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2254
        }
2255
        if (!(e2 & DESC_P_MASK)) {
2256
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2257
        }
2258

    
2259
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2260
            /* to inner privilege */
2261
            get_ss_esp_from_tss(&ss, &sp, dpl);
2262
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2263
                      "\n",
2264
                      ss, sp, param_count, ESP);
2265
            if ((ss & 0xfffc) == 0) {
2266
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2267
            }
2268
            if ((ss & 3) != dpl) {
2269
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2270
            }
2271
            if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
2272
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2273
            }
2274
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2275
            if (ss_dpl != dpl) {
2276
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2277
            }
2278
            if (!(ss_e2 & DESC_S_MASK) ||
2279
                (ss_e2 & DESC_CS_MASK) ||
2280
                !(ss_e2 & DESC_W_MASK)) {
2281
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2282
            }
2283
            if (!(ss_e2 & DESC_P_MASK)) {
2284
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2285
            }
2286

    
2287
            /* push_size = ((param_count * 2) + 8) << shift; */
2288

    
2289
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2290
            old_ssp = env->segs[R_SS].base;
2291

    
2292
            sp_mask = get_sp_mask(ss_e2);
2293
            ssp = get_seg_base(ss_e1, ss_e2);
2294
            if (shift) {
2295
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296
                PUSHL(ssp, sp, sp_mask, ESP);
2297
                for (i = param_count - 1; i >= 0; i--) {
2298
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2299
                    PUSHL(ssp, sp, sp_mask, val);
2300
                }
2301
            } else {
2302
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2303
                PUSHW(ssp, sp, sp_mask, ESP);
2304
                for (i = param_count - 1; i >= 0; i--) {
2305
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2306
                    PUSHW(ssp, sp, sp_mask, val);
2307
                }
2308
            }
2309
            new_stack = 1;
2310
        } else {
2311
            /* to same privilege */
2312
            sp = ESP;
2313
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2314
            ssp = env->segs[R_SS].base;
2315
            /* push_size = (4 << shift); */
2316
            new_stack = 0;
2317
        }
2318

    
2319
        if (shift) {
2320
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2321
            PUSHL(ssp, sp, sp_mask, next_eip);
2322
        } else {
2323
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2324
            PUSHW(ssp, sp, sp_mask, next_eip);
2325
        }
2326

    
2327
        /* from this point, not restartable */
2328

    
2329
        if (new_stack) {
2330
            ss = (ss & ~3) | dpl;
2331
            cpu_x86_load_seg_cache(env, R_SS, ss,
2332
                                   ssp,
2333
                                   get_seg_limit(ss_e1, ss_e2),
2334
                                   ss_e2);
2335
        }
2336

    
2337
        selector = (selector & ~3) | dpl;
2338
        cpu_x86_load_seg_cache(env, R_CS, selector,
2339
                       get_seg_base(e1, e2),
2340
                       get_seg_limit(e1, e2),
2341
                       e2);
2342
        cpu_x86_set_cpl(env, dpl);
2343
        SET_ESP(sp, sp_mask);
2344
        EIP = offset;
2345
    }
2346
}
2347

    
2348
/* real and vm86 mode iret */
2349
void helper_iret_real(int shift)
2350
{
2351
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2352
    target_ulong ssp;
2353
    int eflags_mask;
2354

    
2355
    sp_mask = 0xffff; /* XXXX: use SS segment size? */
2356
    sp = ESP;
2357
    ssp = env->segs[R_SS].base;
2358
    if (shift == 1) {
2359
        /* 32 bits */
2360
        POPL(ssp, sp, sp_mask, new_eip);
2361
        POPL(ssp, sp, sp_mask, new_cs);
2362
        new_cs &= 0xffff;
2363
        POPL(ssp, sp, sp_mask, new_eflags);
2364
    } else {
2365
        /* 16 bits */
2366
        POPW(ssp, sp, sp_mask, new_eip);
2367
        POPW(ssp, sp, sp_mask, new_cs);
2368
        POPW(ssp, sp, sp_mask, new_eflags);
2369
    }
2370
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2371
    env->segs[R_CS].selector = new_cs;
2372
    env->segs[R_CS].base = (new_cs << 4);
2373
    env->eip = new_eip;
2374
    if (env->eflags & VM_MASK) {
2375
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2376
            NT_MASK;
2377
    } else {
2378
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2379
            RF_MASK | NT_MASK;
2380
    }
2381
    if (shift == 0) {
2382
        eflags_mask &= 0xffff;
2383
    }
2384
    cpu_load_eflags(env, new_eflags, eflags_mask);
2385
    env->hflags2 &= ~HF2_NMI_MASK;
2386
}
2387

    
2388
static inline void validate_seg(int seg_reg, int cpl)
2389
{
2390
    int dpl;
2391
    uint32_t e2;
2392

    
2393
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2394
       they may still contain a valid base. I would be interested to
2395
       know how a real x86_64 CPU behaves */
2396
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2397
        (env->segs[seg_reg].selector & 0xfffc) == 0) {
2398
        return;
2399
    }
2400

    
2401
    e2 = env->segs[seg_reg].flags;
2402
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2403
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2404
        /* data or non conforming code segment */
2405
        if (dpl < cpl) {
2406
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2407
        }
2408
    }
2409
}
2410

    
2411
/* protected mode iret */
2412
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2413
{
2414
    uint32_t new_cs, new_eflags, new_ss;
2415
    uint32_t new_es, new_ds, new_fs, new_gs;
2416
    uint32_t e1, e2, ss_e1, ss_e2;
2417
    int cpl, dpl, rpl, eflags_mask, iopl;
2418
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2419

    
2420
#ifdef TARGET_X86_64
2421
    if (shift == 2) {
2422
        sp_mask = -1;
2423
    } else
2424
#endif
2425
    {
2426
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2427
    }
2428
    sp = ESP;
2429
    ssp = env->segs[R_SS].base;
2430
    new_eflags = 0; /* avoid warning */
2431
#ifdef TARGET_X86_64
2432
    if (shift == 2) {
2433
        POPQ(sp, new_eip);
2434
        POPQ(sp, new_cs);
2435
        new_cs &= 0xffff;
2436
        if (is_iret) {
2437
            POPQ(sp, new_eflags);
2438
        }
2439
    } else
2440
#endif
2441
    {
2442
        if (shift == 1) {
2443
            /* 32 bits */
2444
            POPL(ssp, sp, sp_mask, new_eip);
2445
            POPL(ssp, sp, sp_mask, new_cs);
2446
            new_cs &= 0xffff;
2447
            if (is_iret) {
2448
                POPL(ssp, sp, sp_mask, new_eflags);
2449
                if (new_eflags & VM_MASK) {
2450
                    goto return_to_vm86;
2451
                }
2452
            }
2453
        } else {
2454
            /* 16 bits */
2455
            POPW(ssp, sp, sp_mask, new_eip);
2456
            POPW(ssp, sp, sp_mask, new_cs);
2457
            if (is_iret) {
2458
                POPW(ssp, sp, sp_mask, new_eflags);
2459
            }
2460
        }
2461
    }
2462
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2463
              new_cs, new_eip, shift, addend);
2464
    LOG_PCALL_STATE(env);
2465
    if ((new_cs & 0xfffc) == 0) {
2466
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2467
    }
2468
    if (load_segment(&e1, &e2, new_cs) != 0) {
2469
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2470
    }
2471
    if (!(e2 & DESC_S_MASK) ||
2472
        !(e2 & DESC_CS_MASK)) {
2473
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2474
    }
2475
    cpl = env->hflags & HF_CPL_MASK;
2476
    rpl = new_cs & 3;
2477
    if (rpl < cpl) {
2478
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2479
    }
2480
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2481
    if (e2 & DESC_C_MASK) {
2482
        if (dpl > rpl) {
2483
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2484
        }
2485
    } else {
2486
        if (dpl != rpl) {
2487
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2488
        }
2489
    }
2490
    if (!(e2 & DESC_P_MASK)) {
2491
        raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2492
    }
2493

    
2494
    sp += addend;
2495
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2496
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2497
        /* return to same privilege level */
2498
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2499
                       get_seg_base(e1, e2),
2500
                       get_seg_limit(e1, e2),
2501
                       e2);
2502
    } else {
2503
        /* return to different privilege level */
2504
#ifdef TARGET_X86_64
2505
        if (shift == 2) {
2506
            POPQ(sp, new_esp);
2507
            POPQ(sp, new_ss);
2508
            new_ss &= 0xffff;
2509
        } else
2510
#endif
2511
        {
2512
            if (shift == 1) {
2513
                /* 32 bits */
2514
                POPL(ssp, sp, sp_mask, new_esp);
2515
                POPL(ssp, sp, sp_mask, new_ss);
2516
                new_ss &= 0xffff;
2517
            } else {
2518
                /* 16 bits */
2519
                POPW(ssp, sp, sp_mask, new_esp);
2520
                POPW(ssp, sp, sp_mask, new_ss);
2521
            }
2522
        }
2523
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2524
                  new_ss, new_esp);
2525
        if ((new_ss & 0xfffc) == 0) {
2526
#ifdef TARGET_X86_64
2527
            /* NULL ss is allowed in long mode if cpl != 3 */
2528
            /* XXX: test CS64? */
2529
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2530
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2531
                                       0, 0xffffffff,
2532
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2533
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2534
                                       DESC_W_MASK | DESC_A_MASK);
2535
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2536
            } else
2537
#endif
2538
            {
2539
                raise_exception_err(env, EXCP0D_GPF, 0);
2540
            }
2541
        } else {
2542
            if ((new_ss & 3) != rpl) {
2543
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2544
            }
2545
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
2546
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2547
            }
2548
            if (!(ss_e2 & DESC_S_MASK) ||
2549
                (ss_e2 & DESC_CS_MASK) ||
2550
                !(ss_e2 & DESC_W_MASK)) {
2551
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2552
            }
2553
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2554
            if (dpl != rpl) {
2555
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2556
            }
2557
            if (!(ss_e2 & DESC_P_MASK)) {
2558
                raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2559
            }
2560
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2561
                                   get_seg_base(ss_e1, ss_e2),
2562
                                   get_seg_limit(ss_e1, ss_e2),
2563
                                   ss_e2);
2564
        }
2565

    
2566
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2567
                       get_seg_base(e1, e2),
2568
                       get_seg_limit(e1, e2),
2569
                       e2);
2570
        cpu_x86_set_cpl(env, rpl);
2571
        sp = new_esp;
2572
#ifdef TARGET_X86_64
2573
        if (env->hflags & HF_CS64_MASK) {
2574
            sp_mask = -1;
2575
        } else
2576
#endif
2577
        {
2578
            sp_mask = get_sp_mask(ss_e2);
2579
        }
2580

    
2581
        /* validate data segments */
2582
        validate_seg(R_ES, rpl);
2583
        validate_seg(R_DS, rpl);
2584
        validate_seg(R_FS, rpl);
2585
        validate_seg(R_GS, rpl);
2586

    
2587
        sp += addend;
2588
    }
2589
    SET_ESP(sp, sp_mask);
2590
    env->eip = new_eip;
2591
    if (is_iret) {
2592
        /* NOTE: 'cpl' is the _old_ CPL */
2593
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2594
        if (cpl == 0) {
2595
            eflags_mask |= IOPL_MASK;
2596
        }
2597
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2598
        if (cpl <= iopl) {
2599
            eflags_mask |= IF_MASK;
2600
        }
2601
        if (shift == 0) {
2602
            eflags_mask &= 0xffff;
2603
        }
2604
        cpu_load_eflags(env, new_eflags, eflags_mask);
2605
    }
2606
    return;
2607

    
2608
 return_to_vm86:
2609
    POPL(ssp, sp, sp_mask, new_esp);
2610
    POPL(ssp, sp, sp_mask, new_ss);
2611
    POPL(ssp, sp, sp_mask, new_es);
2612
    POPL(ssp, sp, sp_mask, new_ds);
2613
    POPL(ssp, sp, sp_mask, new_fs);
2614
    POPL(ssp, sp, sp_mask, new_gs);
2615

    
2616
    /* modify processor state */
2617
    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2618
                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2619
                    VIP_MASK);
2620
    load_seg_vm(R_CS, new_cs & 0xffff);
2621
    cpu_x86_set_cpl(env, 3);
2622
    load_seg_vm(R_SS, new_ss & 0xffff);
2623
    load_seg_vm(R_ES, new_es & 0xffff);
2624
    load_seg_vm(R_DS, new_ds & 0xffff);
2625
    load_seg_vm(R_FS, new_fs & 0xffff);
2626
    load_seg_vm(R_GS, new_gs & 0xffff);
2627

    
2628
    env->eip = new_eip & 0xffff;
2629
    ESP = new_esp;
2630
}
2631

    
2632
void helper_iret_protected(int shift, int next_eip)
2633
{
2634
    int tss_selector, type;
2635
    uint32_t e1, e2;
2636

    
2637
    /* specific case for TSS */
2638
    if (env->eflags & NT_MASK) {
2639
#ifdef TARGET_X86_64
2640
        if (env->hflags & HF_LMA_MASK) {
2641
            raise_exception_err(env, EXCP0D_GPF, 0);
2642
        }
2643
#endif
2644
        tss_selector = lduw_kernel(env->tr.base + 0);
2645
        if (tss_selector & 4) {
2646
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2647
        }
2648
        if (load_segment(&e1, &e2, tss_selector) != 0) {
2649
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2650
        }
2651
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2652
        /* NOTE: we check both segment and busy TSS */
2653
        if (type != 3) {
2654
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2655
        }
2656
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2657
    } else {
2658
        helper_ret_protected(shift, 1, 0);
2659
    }
2660
    env->hflags2 &= ~HF2_NMI_MASK;
2661
}
2662

    
2663
void helper_lret_protected(int shift, int addend)
2664
{
2665
    helper_ret_protected(shift, 0, addend);
2666
}
2667

    
2668
void helper_sysenter(void)
2669
{
2670
    if (env->sysenter_cs == 0) {
2671
        raise_exception_err(env, EXCP0D_GPF, 0);
2672
    }
2673
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2674
    cpu_x86_set_cpl(env, 0);
2675

    
2676
#ifdef TARGET_X86_64
2677
    if (env->hflags & HF_LMA_MASK) {
2678
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2679
                               0, 0xffffffff,
2680
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2681
                               DESC_S_MASK |
2682
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2683
                               DESC_L_MASK);
2684
    } else
2685
#endif
2686
    {
2687
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2688
                               0, 0xffffffff,
2689
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2690
                               DESC_S_MASK |
2691
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2692
    }
2693
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2694
                           0, 0xffffffff,
2695
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2696
                           DESC_S_MASK |
2697
                           DESC_W_MASK | DESC_A_MASK);
2698
    ESP = env->sysenter_esp;
2699
    EIP = env->sysenter_eip;
2700
}
2701

    
2702
void helper_sysexit(int dflag)
2703
{
2704
    int cpl;
2705

    
2706
    cpl = env->hflags & HF_CPL_MASK;
2707
    if (env->sysenter_cs == 0 || cpl != 0) {
2708
        raise_exception_err(env, EXCP0D_GPF, 0);
2709
    }
2710
    cpu_x86_set_cpl(env, 3);
2711
#ifdef TARGET_X86_64
2712
    if (dflag == 2) {
2713
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2714
                               3, 0, 0xffffffff,
2715
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2716
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2717
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2718
                               DESC_L_MASK);
2719
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2720
                               3, 0, 0xffffffff,
2721
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2722
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2723
                               DESC_W_MASK | DESC_A_MASK);
2724
    } else
2725
#endif
2726
    {
2727
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2728
                               3, 0, 0xffffffff,
2729
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2730
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2731
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2732
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2733
                               3, 0, 0xffffffff,
2734
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2735
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2736
                               DESC_W_MASK | DESC_A_MASK);
2737
    }
2738
    ESP = ECX;
2739
    EIP = EDX;
2740
}
2741

    
2742
#if defined(CONFIG_USER_ONLY)
2743
target_ulong helper_read_crN(int reg)
2744
{
2745
    return 0;
2746
}
2747

    
2748
void helper_write_crN(int reg, target_ulong t0)
2749
{
2750
}
2751

    
2752
void helper_movl_drN_T0(int reg, target_ulong t0)
2753
{
2754
}
2755
#else
2756
target_ulong helper_read_crN(int reg)
2757
{
2758
    target_ulong val;
2759

    
2760
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2761
    switch (reg) {
2762
    default:
2763
        val = env->cr[reg];
2764
        break;
2765
    case 8:
2766
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2767
            val = cpu_get_apic_tpr(env->apic_state);
2768
        } else {
2769
            val = env->v_tpr;
2770
        }
2771
        break;
2772
    }
2773
    return val;
2774
}
2775

    
2776
void helper_write_crN(int reg, target_ulong t0)
2777
{
2778
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2779
    switch (reg) {
2780
    case 0:
2781
        cpu_x86_update_cr0(env, t0);
2782
        break;
2783
    case 3:
2784
        cpu_x86_update_cr3(env, t0);
2785
        break;
2786
    case 4:
2787
        cpu_x86_update_cr4(env, t0);
2788
        break;
2789
    case 8:
2790
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
2791
            cpu_set_apic_tpr(env->apic_state, t0);
2792
        }
2793
        env->v_tpr = t0 & 0x0f;
2794
        break;
2795
    default:
2796
        env->cr[reg] = t0;
2797
        break;
2798
    }
2799
}
2800

    
2801
void helper_movl_drN_T0(int reg, target_ulong t0)
2802
{
2803
    int i;
2804

    
2805
    if (reg < 4) {
2806
        hw_breakpoint_remove(env, reg);
2807
        env->dr[reg] = t0;
2808
        hw_breakpoint_insert(env, reg);
2809
    } else if (reg == 7) {
2810
        for (i = 0; i < 4; i++) {
2811
            hw_breakpoint_remove(env, i);
2812
        }
2813
        env->dr[7] = t0;
2814
        for (i = 0; i < 4; i++) {
2815
            hw_breakpoint_insert(env, i);
2816
        }
2817
    } else {
2818
        env->dr[reg] = t0;
2819
    }
2820
}
2821
#endif
2822

    
2823
void helper_lmsw(target_ulong t0)
2824
{
2825
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2826
       if already set to one. */
2827
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2828
    helper_write_crN(0, t0);
2829
}
2830

    
2831
void helper_invlpg(target_ulong addr)
2832
{
2833
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2834
    tlb_flush_page(env, addr);
2835
}
2836

    
2837
void helper_rdtsc(void)
2838
{
2839
    uint64_t val;
2840

    
2841
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2842
        raise_exception(env, EXCP0D_GPF);
2843
    }
2844
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2845

    
2846
    val = cpu_get_tsc(env) + env->tsc_offset;
2847
    EAX = (uint32_t)(val);
2848
    EDX = (uint32_t)(val >> 32);
2849
}
2850

    
2851
void helper_rdtscp(void)
2852
{
2853
    helper_rdtsc();
2854
    ECX = (uint32_t)(env->tsc_aux);
2855
}
2856

    
2857
void helper_rdpmc(void)
2858
{
2859
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2860
        raise_exception(env, EXCP0D_GPF);
2861
    }
2862
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2863

    
2864
    /* currently unimplemented */
2865
    qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
2866
    raise_exception_err(env, EXCP06_ILLOP, 0);
2867
}
2868

    
2869
#if defined(CONFIG_USER_ONLY)
2870
void helper_wrmsr(void)
2871
{
2872
}
2873

    
2874
void helper_rdmsr(void)
2875
{
2876
}
2877
#else
2878
void helper_wrmsr(void)
2879
{
2880
    uint64_t val;
2881

    
2882
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2883

    
2884
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2885

    
2886
    switch ((uint32_t)ECX) {
2887
    case MSR_IA32_SYSENTER_CS:
2888
        env->sysenter_cs = val & 0xffff;
2889
        break;
2890
    case MSR_IA32_SYSENTER_ESP:
2891
        env->sysenter_esp = val;
2892
        break;
2893
    case MSR_IA32_SYSENTER_EIP:
2894
        env->sysenter_eip = val;
2895
        break;
2896
    case MSR_IA32_APICBASE:
2897
        cpu_set_apic_base(env->apic_state, val);
2898
        break;
2899
    case MSR_EFER:
2900
        {
2901
            uint64_t update_mask;
2902

    
2903
            update_mask = 0;
2904
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
2905
                update_mask |= MSR_EFER_SCE;
2906
            }
2907
            if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2908
                update_mask |= MSR_EFER_LME;
2909
            }
2910
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
2911
                update_mask |= MSR_EFER_FFXSR;
2912
            }
2913
            if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
2914
                update_mask |= MSR_EFER_NXE;
2915
            }
2916
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
2917
                update_mask |= MSR_EFER_SVME;
2918
            }
2919
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
2920
                update_mask |= MSR_EFER_FFXSR;
2921
            }
2922
            cpu_load_efer(env, (env->efer & ~update_mask) |
2923
                          (val & update_mask));
2924
        }
2925
        break;
2926
    case MSR_STAR:
2927
        env->star = val;
2928
        break;
2929
    case MSR_PAT:
2930
        env->pat = val;
2931
        break;
2932
    case MSR_VM_HSAVE_PA:
2933
        env->vm_hsave = val;
2934
        break;
2935
#ifdef TARGET_X86_64
2936
    case MSR_LSTAR:
2937
        env->lstar = val;
2938
        break;
2939
    case MSR_CSTAR:
2940
        env->cstar = val;
2941
        break;
2942
    case MSR_FMASK:
2943
        env->fmask = val;
2944
        break;
2945
    case MSR_FSBASE:
2946
        env->segs[R_FS].base = val;
2947
        break;
2948
    case MSR_GSBASE:
2949
        env->segs[R_GS].base = val;
2950
        break;
2951
    case MSR_KERNELGSBASE:
2952
        env->kernelgsbase = val;
2953
        break;
2954
#endif
2955
    case MSR_MTRRphysBase(0):
2956
    case MSR_MTRRphysBase(1):
2957
    case MSR_MTRRphysBase(2):
2958
    case MSR_MTRRphysBase(3):
2959
    case MSR_MTRRphysBase(4):
2960
    case MSR_MTRRphysBase(5):
2961
    case MSR_MTRRphysBase(6):
2962
    case MSR_MTRRphysBase(7):
2963
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
2964
        break;
2965
    case MSR_MTRRphysMask(0):
2966
    case MSR_MTRRphysMask(1):
2967
    case MSR_MTRRphysMask(2):
2968
    case MSR_MTRRphysMask(3):
2969
    case MSR_MTRRphysMask(4):
2970
    case MSR_MTRRphysMask(5):
2971
    case MSR_MTRRphysMask(6):
2972
    case MSR_MTRRphysMask(7):
2973
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
2974
        break;
2975
    case MSR_MTRRfix64K_00000:
2976
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
2977
        break;
2978
    case MSR_MTRRfix16K_80000:
2979
    case MSR_MTRRfix16K_A0000:
2980
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
2981
        break;
2982
    case MSR_MTRRfix4K_C0000:
2983
    case MSR_MTRRfix4K_C8000:
2984
    case MSR_MTRRfix4K_D0000:
2985
    case MSR_MTRRfix4K_D8000:
2986
    case MSR_MTRRfix4K_E0000:
2987
    case MSR_MTRRfix4K_E8000:
2988
    case MSR_MTRRfix4K_F0000:
2989
    case MSR_MTRRfix4K_F8000:
2990
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
2991
        break;
2992
    case MSR_MTRRdefType:
2993
        env->mtrr_deftype = val;
2994
        break;
2995
    case MSR_MCG_STATUS:
2996
        env->mcg_status = val;
2997
        break;
2998
    case MSR_MCG_CTL:
2999
        if ((env->mcg_cap & MCG_CTL_P)
3000
            && (val == 0 || val == ~(uint64_t)0)) {
3001
            env->mcg_ctl = val;
3002
        }
3003
        break;
3004
    case MSR_TSC_AUX:
3005
        env->tsc_aux = val;
3006
        break;
3007
    case MSR_IA32_MISC_ENABLE:
3008
        env->msr_ia32_misc_enable = val;
3009
        break;
3010
    default:
3011
        if ((uint32_t)ECX >= MSR_MC0_CTL
3012
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3013
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3014
            if ((offset & 0x3) != 0
3015
                || (val == 0 || val == ~(uint64_t)0)) {
3016
                env->mce_banks[offset] = val;
3017
            }
3018
            break;
3019
        }
3020
        /* XXX: exception? */
3021
        break;
3022
    }
3023
}
3024

    
3025
void helper_rdmsr(void)
3026
{
3027
    uint64_t val;
3028

    
3029
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3030

    
3031
    switch ((uint32_t)ECX) {
3032
    case MSR_IA32_SYSENTER_CS:
3033
        val = env->sysenter_cs;
3034
        break;
3035
    case MSR_IA32_SYSENTER_ESP:
3036
        val = env->sysenter_esp;
3037
        break;
3038
    case MSR_IA32_SYSENTER_EIP:
3039
        val = env->sysenter_eip;
3040
        break;
3041
    case MSR_IA32_APICBASE:
3042
        val = cpu_get_apic_base(env->apic_state);
3043
        break;
3044
    case MSR_EFER:
3045
        val = env->efer;
3046
        break;
3047
    case MSR_STAR:
3048
        val = env->star;
3049
        break;
3050
    case MSR_PAT:
3051
        val = env->pat;
3052
        break;
3053
    case MSR_VM_HSAVE_PA:
3054
        val = env->vm_hsave;
3055
        break;
3056
    case MSR_IA32_PERF_STATUS:
3057
        /* tsc_increment_by_tick */
3058
        val = 1000ULL;
3059
        /* CPU multiplier */
3060
        val |= (((uint64_t)4ULL) << 40);
3061
        break;
3062
#ifdef TARGET_X86_64
3063
    case MSR_LSTAR:
3064
        val = env->lstar;
3065
        break;
3066
    case MSR_CSTAR:
3067
        val = env->cstar;
3068
        break;
3069
    case MSR_FMASK:
3070
        val = env->fmask;
3071
        break;
3072
    case MSR_FSBASE:
3073
        val = env->segs[R_FS].base;
3074
        break;
3075
    case MSR_GSBASE:
3076
        val = env->segs[R_GS].base;
3077
        break;
3078
    case MSR_KERNELGSBASE:
3079
        val = env->kernelgsbase;
3080
        break;
3081
    case MSR_TSC_AUX:
3082
        val = env->tsc_aux;
3083
        break;
3084
#endif
3085
    case MSR_MTRRphysBase(0):
3086
    case MSR_MTRRphysBase(1):
3087
    case MSR_MTRRphysBase(2):
3088
    case MSR_MTRRphysBase(3):
3089
    case MSR_MTRRphysBase(4):
3090
    case MSR_MTRRphysBase(5):
3091
    case MSR_MTRRphysBase(6):
3092
    case MSR_MTRRphysBase(7):
3093
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3094
        break;
3095
    case MSR_MTRRphysMask(0):
3096
    case MSR_MTRRphysMask(1):
3097
    case MSR_MTRRphysMask(2):
3098
    case MSR_MTRRphysMask(3):
3099
    case MSR_MTRRphysMask(4):
3100
    case MSR_MTRRphysMask(5):
3101
    case MSR_MTRRphysMask(6):
3102
    case MSR_MTRRphysMask(7):
3103
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3104
        break;
3105
    case MSR_MTRRfix64K_00000:
3106
        val = env->mtrr_fixed[0];
3107
        break;
3108
    case MSR_MTRRfix16K_80000:
3109
    case MSR_MTRRfix16K_A0000:
3110
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3111
        break;
3112
    case MSR_MTRRfix4K_C0000:
3113
    case MSR_MTRRfix4K_C8000:
3114
    case MSR_MTRRfix4K_D0000:
3115
    case MSR_MTRRfix4K_D8000:
3116
    case MSR_MTRRfix4K_E0000:
3117
    case MSR_MTRRfix4K_E8000:
3118
    case MSR_MTRRfix4K_F0000:
3119
    case MSR_MTRRfix4K_F8000:
3120
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3121
        break;
3122
    case MSR_MTRRdefType:
3123
        val = env->mtrr_deftype;
3124
        break;
3125
    case MSR_MTRRcap:
3126
        if (env->cpuid_features & CPUID_MTRR) {
3127
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3128
                MSR_MTRRcap_WC_SUPPORTED;
3129
        } else {
3130
            /* XXX: exception? */
3131
            val = 0;
3132
        }
3133
        break;
3134
    case MSR_MCG_CAP:
3135
        val = env->mcg_cap;
3136
        break;
3137
    case MSR_MCG_CTL:
3138
        if (env->mcg_cap & MCG_CTL_P) {
3139
            val = env->mcg_ctl;
3140
        } else {
3141
            val = 0;
3142
        }
3143
        break;
3144
    case MSR_MCG_STATUS:
3145
        val = env->mcg_status;
3146
        break;
3147
    case MSR_IA32_MISC_ENABLE:
3148
        val = env->msr_ia32_misc_enable;
3149
        break;
3150
    default:
3151
        if ((uint32_t)ECX >= MSR_MC0_CTL
3152
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3153
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3154
            val = env->mce_banks[offset];
3155
            break;
3156
        }
3157
        /* XXX: exception? */
3158
        val = 0;
3159
        break;
3160
    }
3161
    EAX = (uint32_t)(val);
3162
    EDX = (uint32_t)(val >> 32);
3163
}
3164
#endif
3165

    
3166
target_ulong helper_lsl(target_ulong selector1)
3167
{
3168
    unsigned int limit;
3169
    uint32_t e1, e2, eflags, selector;
3170
    int rpl, dpl, cpl, type;
3171

    
3172
    selector = selector1 & 0xffff;
3173
    eflags = helper_cc_compute_all(CC_OP);
3174
    if ((selector & 0xfffc) == 0) {
3175
        goto fail;
3176
    }
3177
    if (load_segment(&e1, &e2, selector) != 0) {
3178
        goto fail;
3179
    }
3180
    rpl = selector & 3;
3181
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3182
    cpl = env->hflags & HF_CPL_MASK;
3183
    if (e2 & DESC_S_MASK) {
3184
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3185
            /* conforming */
3186
        } else {
3187
            if (dpl < cpl || dpl < rpl) {
3188
                goto fail;
3189
            }
3190
        }
3191
    } else {
3192
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3193
        switch (type) {
3194
        case 1:
3195
        case 2:
3196
        case 3:
3197
        case 9:
3198
        case 11:
3199
            break;
3200
        default:
3201
            goto fail;
3202
        }
3203
        if (dpl < cpl || dpl < rpl) {
3204
        fail:
3205
            CC_SRC = eflags & ~CC_Z;
3206
            return 0;
3207
        }
3208
    }
3209
    limit = get_seg_limit(e1, e2);
3210
    CC_SRC = eflags | CC_Z;
3211
    return limit;
3212
}
3213

    
3214
target_ulong helper_lar(target_ulong selector1)
3215
{
3216
    uint32_t e1, e2, eflags, selector;
3217
    int rpl, dpl, cpl, type;
3218

    
3219
    selector = selector1 & 0xffff;
3220
    eflags = helper_cc_compute_all(CC_OP);
3221
    if ((selector & 0xfffc) == 0) {
3222
        goto fail;
3223
    }
3224
    if (load_segment(&e1, &e2, selector) != 0) {
3225
        goto fail;
3226
    }
3227
    rpl = selector & 3;
3228
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3229
    cpl = env->hflags & HF_CPL_MASK;
3230
    if (e2 & DESC_S_MASK) {
3231
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3232
            /* conforming */
3233
        } else {
3234
            if (dpl < cpl || dpl < rpl) {
3235
                goto fail;
3236
            }
3237
        }
3238
    } else {
3239
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3240
        switch (type) {
3241
        case 1:
3242
        case 2:
3243
        case 3:
3244
        case 4:
3245
        case 5:
3246
        case 9:
3247
        case 11:
3248
        case 12:
3249
            break;
3250
        default:
3251
            goto fail;
3252
        }
3253
        if (dpl < cpl || dpl < rpl) {
3254
        fail:
3255
            CC_SRC = eflags & ~CC_Z;
3256
            return 0;
3257
        }
3258
    }
3259
    CC_SRC = eflags | CC_Z;
3260
    return e2 & 0x00f0ff00;
3261
}
3262

    
3263
void helper_verr(target_ulong selector1)
3264
{
3265
    uint32_t e1, e2, eflags, selector;
3266
    int rpl, dpl, cpl;
3267

    
3268
    selector = selector1 & 0xffff;
3269
    eflags = helper_cc_compute_all(CC_OP);
3270
    if ((selector & 0xfffc) == 0) {
3271
        goto fail;
3272
    }
3273
    if (load_segment(&e1, &e2, selector) != 0) {
3274
        goto fail;
3275
    }
3276
    if (!(e2 & DESC_S_MASK)) {
3277
        goto fail;
3278
    }
3279
    rpl = selector & 3;
3280
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281
    cpl = env->hflags & HF_CPL_MASK;
3282
    if (e2 & DESC_CS_MASK) {
3283
        if (!(e2 & DESC_R_MASK)) {
3284
            goto fail;
3285
        }
3286
        if (!(e2 & DESC_C_MASK)) {
3287
            if (dpl < cpl || dpl < rpl) {
3288
                goto fail;
3289
            }
3290
        }
3291
    } else {
3292
        if (dpl < cpl || dpl < rpl) {
3293
        fail:
3294
            CC_SRC = eflags & ~CC_Z;
3295
            return;
3296
        }
3297
    }
3298
    CC_SRC = eflags | CC_Z;
3299
}
3300

    
3301
void helper_verw(target_ulong selector1)
3302
{
3303
    uint32_t e1, e2, eflags, selector;
3304
    int rpl, dpl, cpl;
3305

    
3306
    selector = selector1 & 0xffff;
3307
    eflags = helper_cc_compute_all(CC_OP);
3308
    if ((selector & 0xfffc) == 0) {
3309
        goto fail;
3310
    }
3311
    if (load_segment(&e1, &e2, selector) != 0) {
3312
        goto fail;
3313
    }
3314
    if (!(e2 & DESC_S_MASK)) {
3315
        goto fail;
3316
    }
3317
    rpl = selector & 3;
3318
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3319
    cpl = env->hflags & HF_CPL_MASK;
3320
    if (e2 & DESC_CS_MASK) {
3321
        goto fail;
3322
    } else {
3323
        if (dpl < cpl || dpl < rpl) {
3324
            goto fail;
3325
        }
3326
        if (!(e2 & DESC_W_MASK)) {
3327
        fail:
3328
            CC_SRC = eflags & ~CC_Z;
3329
            return;
3330
        }
3331
    }
3332
    CC_SRC = eflags | CC_Z;
3333
}
3334

    
3335
#if defined(CONFIG_USER_ONLY)
3336
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
3337
{
3338
    CPUX86State *saved_env;
3339

    
3340
    saved_env = env;
3341
    env = s;
3342
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3343
        selector &= 0xffff;
3344
        cpu_x86_load_seg_cache(env, seg_reg, selector,
3345
                               (selector << 4), 0xffff, 0);
3346
    } else {
3347
        helper_load_seg(seg_reg, selector);
3348
    }
3349
    env = saved_env;
3350
}
3351
#endif
3352

    
3353
static void do_hlt(void)
3354
{
3355
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3356
    env->halted = 1;
3357
    env->exception_index = EXCP_HLT;
3358
    cpu_loop_exit(env);
3359
}
3360

    
3361
void helper_hlt(int next_eip_addend)
3362
{
3363
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
3364
    EIP += next_eip_addend;
3365

    
3366
    do_hlt();
3367
}
3368

    
3369
void helper_monitor(target_ulong ptr)
3370
{
3371
    if ((uint32_t)ECX != 0) {
3372
        raise_exception(env, EXCP0D_GPF);
3373
    }
3374
    /* XXX: store address? */
3375
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
3376
}
3377

    
3378
void helper_mwait(int next_eip_addend)
3379
{
3380
    if ((uint32_t)ECX != 0) {
3381
        raise_exception(env, EXCP0D_GPF);
3382
    }
3383
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
3384
    EIP += next_eip_addend;
3385

    
3386
    /* XXX: not complete but not completely erroneous */
3387
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3388
        /* more than one CPU: do not sleep because another CPU may
3389
           wake this one */
3390
    } else {
3391
        do_hlt();
3392
    }
3393
}
3394

    
3395
void helper_debug(void)
3396
{
3397
    env->exception_index = EXCP_DEBUG;
3398
    cpu_loop_exit(env);
3399
}
3400

    
3401
void helper_boundw(target_ulong a0, int v)
3402
{
3403
    int low, high;
3404

    
3405
    low = ldsw(a0);
3406
    high = ldsw(a0 + 2);
3407
    v = (int16_t)v;
3408
    if (v < low || v > high) {
3409
        raise_exception(env, EXCP05_BOUND);
3410
    }
3411
}
3412

    
3413
void helper_boundl(target_ulong a0, int v)
3414
{
3415
    int low, high;
3416

    
3417
    low = ldl(a0);
3418
    high = ldl(a0 + 4);
3419
    if (v < low || v > high) {
3420
        raise_exception(env, EXCP05_BOUND);
3421
    }
3422
}
3423

    
3424
#if !defined(CONFIG_USER_ONLY)
3425

    
3426
#define MMUSUFFIX _mmu
3427

    
3428
#define SHIFT 0
3429
#include "softmmu_template.h"
3430

    
3431
#define SHIFT 1
3432
#include "softmmu_template.h"
3433

    
3434
#define SHIFT 2
3435
#include "softmmu_template.h"
3436

    
3437
#define SHIFT 3
3438
#include "softmmu_template.h"
3439

    
3440
#endif
3441

    
3442
#if !defined(CONFIG_USER_ONLY)
3443
/* try to fill the TLB and return an exception if error. If retaddr is
3444
   NULL, it means that the function was called in C code (i.e. not
3445
   from generated code or from helper.c) */
3446
/* XXX: fix it to restore all registers */
3447
void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
3448
              uintptr_t retaddr)
3449
{
3450
    TranslationBlock *tb;
3451
    int ret;
3452
    CPUX86State *saved_env;
3453

    
3454
    saved_env = env;
3455
    env = env1;
3456

    
3457
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
3458
    if (ret) {
3459
        if (retaddr) {
3460
            /* now we have a real cpu fault */
3461
            tb = tb_find_pc(retaddr);
3462
            if (tb) {
3463
                /* the PC is inside the translated code. It means that we have
3464
                   a virtual CPU fault */
3465
                cpu_restore_state(tb, env, retaddr);
3466
            }
3467
        }
3468
        raise_exception_err(env, env->exception_index, env->error_code);
3469
    }
3470
    env = saved_env;
3471
}
3472
#endif
3473

    
3474
/* Secure Virtual Machine helpers */
3475

    
3476
#if defined(CONFIG_USER_ONLY)
3477

    
3478
void helper_vmrun(int aflag, int next_eip_addend)
3479
{
3480
}
3481

    
3482
void helper_vmmcall(void)
3483
{
3484
}
3485

    
3486
void helper_vmload(int aflag)
3487
{
3488
}
3489

    
3490
void helper_vmsave(int aflag)
3491
{
3492
}
3493

    
3494
void helper_stgi(void)
3495
{
3496
}
3497

    
3498
void helper_clgi(void)
3499
{
3500
}
3501

    
3502
void helper_skinit(void)
3503
{
3504
}
3505

    
3506
void helper_invlpga(int aflag)
3507
{
3508
}
3509

    
3510
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
3511
{
3512
}
3513

    
3514
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
3515
{
3516
}
3517

    
3518
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
3519
{
3520
}
3521

    
3522
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
3523
                                   uint64_t param)
3524
{
3525
}
3526

    
3527
void helper_svm_check_io(uint32_t port, uint32_t param,
3528
                         uint32_t next_eip_addend)
3529
{
3530
}
3531
#else
3532

    
3533
static inline void svm_save_seg(target_phys_addr_t addr,
3534
                                const SegmentCache *sc)
3535
{
3536
    stw_phys(addr + offsetof(struct vmcb_seg, selector),
3537
             sc->selector);
3538
    stq_phys(addr + offsetof(struct vmcb_seg, base),
3539
             sc->base);
3540
    stl_phys(addr + offsetof(struct vmcb_seg, limit),
3541
             sc->limit);
3542
    stw_phys(addr + offsetof(struct vmcb_seg, attrib),
3543
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
3544
}
3545

    
3546
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
3547
{
3548
    unsigned int flags;
3549

    
3550
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
3551
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
3552
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
3553
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
3554
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
3555
}
3556

    
3557
static inline void svm_load_seg_cache(target_phys_addr_t addr,
3558
                                      CPUX86State *env, int seg_reg)
3559
{
3560
    SegmentCache sc1, *sc = &sc1;
3561

    
3562
    svm_load_seg(addr, sc);
3563
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
3564
                           sc->base, sc->limit, sc->flags);
3565
}
3566

    
3567
void helper_vmrun(int aflag, int next_eip_addend)
3568
{
3569
    target_ulong addr;
3570
    uint32_t event_inj;
3571
    uint32_t int_ctl;
3572

    
3573
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
3574

    
3575
    if (aflag == 2) {
3576
        addr = EAX;
3577
    } else {
3578
        addr = (uint32_t)EAX;
3579
    }
3580

    
3581
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
3582

    
3583
    env->vm_vmcb = addr;
3584

    
3585
    /* save the current CPU state in the hsave page */
3586
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
3587
             env->gdt.base);
3588
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
3589
             env->gdt.limit);
3590

    
3591
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
3592
             env->idt.base);
3593
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
3594
             env->idt.limit);
3595

    
3596
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3597
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3598
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3599
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3600
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3601
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3602

    
3603
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3604
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
3605
             cpu_compute_eflags(env));
3606

    
3607
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
3608
                 &env->segs[R_ES]);
3609
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
3610
                 &env->segs[R_CS]);
3611
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
3612
                 &env->segs[R_SS]);
3613
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
3614
                 &env->segs[R_DS]);
3615

    
3616
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
3617
             EIP + next_eip_addend);
3618
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
3619
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
3620

    
3621
    /* load the interception bitmaps so we do not need to access the
3622
       vmcb in svm mode */
3623
    env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3624
                                                      control.intercept));
3625
    env->intercept_cr_read = lduw_phys(env->vm_vmcb +
3626
                                       offsetof(struct vmcb,
3627
                                                control.intercept_cr_read));
3628
    env->intercept_cr_write = lduw_phys(env->vm_vmcb +
3629
                                        offsetof(struct vmcb,
3630
                                                 control.intercept_cr_write));
3631
    env->intercept_dr_read = lduw_phys(env->vm_vmcb +
3632
                                       offsetof(struct vmcb,
3633
                                                control.intercept_dr_read));
3634
    env->intercept_dr_write = lduw_phys(env->vm_vmcb +
3635
                                        offsetof(struct vmcb,
3636
                                                 control.intercept_dr_write));
3637
    env->intercept_exceptions = ldl_phys(env->vm_vmcb +
3638
                                         offsetof(struct vmcb,
3639
                                                  control.intercept_exceptions
3640
                                                  ));
3641

    
3642
    /* enable intercepts */
3643
    env->hflags |= HF_SVMI_MASK;
3644

    
3645
    env->tsc_offset = ldq_phys(env->vm_vmcb +
3646
                               offsetof(struct vmcb, control.tsc_offset));
3647

    
3648
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3649
                                                      save.gdtr.base));
3650
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
3651
                                                      save.gdtr.limit));
3652

    
3653
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3654
                                                      save.idtr.base));
3655
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
3656
                                                      save.idtr.limit));
3657

    
3658
    /* clear exit_info_2 so we behave like the real hardware */
3659
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
3660

    
3661
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3662
                                                             save.cr0)));
3663
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3664
                                                             save.cr4)));
3665
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3666
                                                             save.cr3)));
3667
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
3668
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
3669
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
3670
    if (int_ctl & V_INTR_MASKING_MASK) {
3671
        env->v_tpr = int_ctl & V_TPR_MASK;
3672
        env->hflags2 |= HF2_VINTR_MASK;
3673
        if (env->eflags & IF_MASK) {
3674
            env->hflags2 |= HF2_HIF_MASK;
3675
        }
3676
    }
3677

    
3678
    cpu_load_efer(env,
3679
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
3680
    env->eflags = 0;
3681
    cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
3682
                                                          save.rflags)),
3683
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
3684
    CC_OP = CC_OP_EFLAGS;
3685

    
3686
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
3687
                       env, R_ES);
3688
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
3689
                       env, R_CS);
3690
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
3691
                       env, R_SS);
3692
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
3693
                       env, R_DS);
3694

    
3695
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
3696
    env->eip = EIP;
3697
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
3698
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
3699
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
3700
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
3701
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
3702
                                                           save.cpl)));
3703

    
3704
    /* FIXME: guest state consistency checks */
3705

    
3706
    switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
3707
    case TLB_CONTROL_DO_NOTHING:
3708
        break;
3709
    case TLB_CONTROL_FLUSH_ALL_ASID:
3710
        /* FIXME: this is not 100% correct but should work for now */
3711
        tlb_flush(env, 1);
3712
        break;
3713
    }
3714

    
3715
    env->hflags2 |= HF2_GIF_MASK;
3716

    
3717
    if (int_ctl & V_IRQ_MASK) {
3718
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
3719
    }
3720

    
3721
    /* maybe we need to inject an event */
3722
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
3723
                                                 control.event_inj));
3724
    if (event_inj & SVM_EVTINJ_VALID) {
3725
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
3726
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
3727
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
3728
                                          offsetof(struct vmcb,
3729
                                                   control.event_inj_err));
3730

    
3731
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
3732
        /* FIXME: need to implement valid_err */
3733
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
3734
        case SVM_EVTINJ_TYPE_INTR:
3735
            env->exception_index = vector;
3736
            env->error_code = event_inj_err;
3737
            env->exception_is_int = 0;
3738
            env->exception_next_eip = -1;
3739
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
3740
            /* XXX: is it always correct? */
3741
            do_interrupt_x86_hardirq(env, vector, 1);
3742
            break;
3743
        case SVM_EVTINJ_TYPE_NMI:
3744
            env->exception_index = EXCP02_NMI;
3745
            env->error_code = event_inj_err;
3746
            env->exception_is_int = 0;
3747
            env->exception_next_eip = EIP;
3748
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
3749
            cpu_loop_exit(env);
3750
            break;
3751
        case SVM_EVTINJ_TYPE_EXEPT:
3752
            env->exception_index = vector;
3753
            env->error_code = event_inj_err;
3754
            env->exception_is_int = 0;
3755
            env->exception_next_eip = -1;
3756
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
3757
            cpu_loop_exit(env);
3758
            break;
3759
        case SVM_EVTINJ_TYPE_SOFT:
3760
            env->exception_index = vector;
3761
            env->error_code = event_inj_err;
3762
            env->exception_is_int = 1;
3763
            env->exception_next_eip = EIP;
3764
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
3765
            cpu_loop_exit(env);
3766
            break;
3767
        }
3768
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
3769
                      env->error_code);
3770
    }
3771
}
3772

    
3773
void helper_vmmcall(void)
3774
{
3775
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
3776
    raise_exception(env, EXCP06_ILLOP);
3777
}
3778

    
3779
void helper_vmload(int aflag)
3780
{
3781
    target_ulong addr;
3782

    
3783
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
3784

    
3785
    if (aflag == 2) {
3786
        addr = EAX;
3787
    } else {
3788
        addr = (uint32_t)EAX;
3789
    }
3790

    
3791
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
3792
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
3793
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
3794
                  env->segs[R_FS].base);
3795

    
3796
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
3797
                       env, R_FS);
3798
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
3799
                       env, R_GS);
3800
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
3801
                 &env->tr);
3802
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
3803
                 &env->ldt);
3804

    
3805
#ifdef TARGET_X86_64
3806
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
3807
                                                 save.kernel_gs_base));
3808
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
3809
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
3810
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
3811
#endif
3812
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
3813
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
3814
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
3815
                                                 save.sysenter_esp));
3816
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
3817
                                                 save.sysenter_eip));
3818
}
3819

    
3820
void helper_vmsave(int aflag)
3821
{
3822
    target_ulong addr;
3823

    
3824
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
3825

    
3826
    if (aflag == 2) {
3827
        addr = EAX;
3828
    } else {
3829
        addr = (uint32_t)EAX;
3830
    }
3831

    
3832
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
3833
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
3834
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
3835
                  env->segs[R_FS].base);
3836

    
3837
    svm_save_seg(addr + offsetof(struct vmcb, save.fs),
3838
                 &env->segs[R_FS]);
3839
    svm_save_seg(addr + offsetof(struct vmcb, save.gs),
3840
                 &env->segs[R_GS]);
3841
    svm_save_seg(addr + offsetof(struct vmcb, save.tr),
3842
                 &env->tr);
3843
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
3844
                 &env->ldt);
3845

    
3846
#ifdef TARGET_X86_64
3847
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
3848
             env->kernelgsbase);
3849
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
3850
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
3851
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
3852
#endif
3853
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
3854
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
3855
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
3856
             env->sysenter_esp);
3857
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
3858
             env->sysenter_eip);
3859
}
3860

    
3861
void helper_stgi(void)
3862
{
3863
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
3864
    env->hflags2 |= HF2_GIF_MASK;
3865
}
3866

    
3867
void helper_clgi(void)
3868
{
3869
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
3870
    env->hflags2 &= ~HF2_GIF_MASK;
3871
}
3872

    
3873
void helper_skinit(void)
3874
{
3875
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
3876
    /* XXX: not implemented */
3877
    raise_exception(env, EXCP06_ILLOP);
3878
}
3879

    
3880
void helper_invlpga(int aflag)
3881
{
3882
    target_ulong addr;
3883

    
3884
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
3885

    
3886
    if (aflag == 2) {
3887
        addr = EAX;
3888
    } else {
3889
        addr = (uint32_t)EAX;
3890
    }
3891

    
3892
    /* XXX: could use the ASID to see if it is needed to do the
3893
       flush */
3894
    tlb_flush_page(env, addr);
3895
}
3896

    
3897
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
3898
{
3899
    if (likely(!(env->hflags & HF_SVMI_MASK))) {
3900
        return;
3901
    }
3902
    switch (type) {
3903
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
3904
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
3905
            helper_vmexit(type, param);
3906
        }
3907
        break;
3908
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
3909
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
3910
            helper_vmexit(type, param);
3911
        }
3912
        break;
3913
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
3914
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
3915
            helper_vmexit(type, param);
3916
        }
3917
        break;
3918
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
3919
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
3920
            helper_vmexit(type, param);
3921
        }
3922
        break;
3923
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
3924
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
3925
            helper_vmexit(type, param);
3926
        }
3927
        break;
3928
    case SVM_EXIT_MSR:
3929
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
3930
            /* FIXME: this should be read in at vmrun (faster this way?) */
3931
            uint64_t addr = ldq_phys(env->vm_vmcb +
3932
                                     offsetof(struct vmcb,
3933
                                              control.msrpm_base_pa));
3934
            uint32_t t0, t1;
3935

    
3936
            switch ((uint32_t)ECX) {
3937
            case 0 ... 0x1fff:
3938
                t0 = (ECX * 2) % 8;
3939
                t1 = (ECX * 2) / 8;
3940
                break;
3941
            case 0xc0000000 ... 0xc0001fff:
3942
                t0 = (8192 + ECX - 0xc0000000) * 2;
3943
                t1 = (t0 / 8);
3944
                t0 %= 8;
3945
                break;
3946
            case 0xc0010000 ... 0xc0011fff:
3947
                t0 = (16384 + ECX - 0xc0010000) * 2;
3948
                t1 = (t0 / 8);
3949
                t0 %= 8;
3950
                break;
3951
            default:
3952
                helper_vmexit(type, param);
3953
                t0 = 0;
3954
                t1 = 0;
3955
                break;
3956
            }
3957
            if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
3958
                helper_vmexit(type, param);
3959
            }
3960
        }
3961
        break;
3962
    default:
3963
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
3964
            helper_vmexit(type, param);
3965
        }
3966
        break;
3967
    }
3968
}
3969

    
3970
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
3971
                                   uint64_t param)
3972
{
3973
    CPUX86State *saved_env;
3974

    
3975
    saved_env = env;
3976
    env = env1;
3977
    helper_svm_check_intercept_param(type, param);
3978
    env = saved_env;
3979
}
3980

    
3981
void helper_svm_check_io(uint32_t port, uint32_t param,
3982
                         uint32_t next_eip_addend)
3983
{
3984
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
3985
        /* FIXME: this should be read in at vmrun (faster this way?) */
3986
        uint64_t addr = ldq_phys(env->vm_vmcb +
3987
                                 offsetof(struct vmcb, control.iopm_base_pa));
3988
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
3989

    
3990
        if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
3991
            /* next EIP */
3992
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
3993
                     env->eip + next_eip_addend);
3994
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
3995
        }
3996
    }
3997
}
3998

    
3999
/* Note: currently only 32 bits of exit_code are used */
4000
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4001
{
4002
    uint32_t int_ctl;
4003

    
4004
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
4005
                  PRIx64 ", " TARGET_FMT_lx ")!\n",
4006
                  exit_code, exit_info_1,
4007
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4008
                                                   control.exit_info_2)),
4009
                  EIP);
4010

    
4011
    if (env->hflags & HF_INHIBIT_IRQ_MASK) {
4012
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
4013
                 SVM_INTERRUPT_SHADOW_MASK);
4014
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4015
    } else {
4016
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4017
    }
4018

    
4019
    /* Save the VM state in the vmcb */
4020
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
4021
                 &env->segs[R_ES]);
4022
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4023
                 &env->segs[R_CS]);
4024
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4025
                 &env->segs[R_SS]);
4026
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4027
                 &env->segs[R_DS]);
4028

    
4029
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
4030
             env->gdt.base);
4031
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
4032
             env->gdt.limit);
4033

    
4034
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
4035
             env->idt.base);
4036
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
4037
             env->idt.limit);
4038

    
4039
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4040
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4041
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4042
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4043
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4044

    
4045
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4046
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
4047
    int_ctl |= env->v_tpr & V_TPR_MASK;
4048
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
4049
        int_ctl |= V_IRQ_MASK;
4050
    }
4051
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4052

    
4053
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
4054
             cpu_compute_eflags(env));
4055
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4056
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4057
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4058
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4059
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4060
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
4061
             env->hflags & HF_CPL_MASK);
4062

    
4063
    /* Reload the host state from vm_hsave */
4064
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4065
    env->hflags &= ~HF_SVMI_MASK;
4066
    env->intercept = 0;
4067
    env->intercept_exceptions = 0;
4068
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4069
    env->tsc_offset = 0;
4070

    
4071
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4072
                                                       save.gdtr.base));
4073
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4074
                                                       save.gdtr.limit));
4075

    
4076
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4077
                                                       save.idtr.base));
4078
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4079
                                                       save.idtr.limit));
4080

    
4081
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4082
                                                              save.cr0)) |
4083
                       CR0_PE_MASK);
4084
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4085
                                                              save.cr4)));
4086
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4087
                                                              save.cr3)));
4088
    /* we need to set the efer after the crs so the hidden flags get
4089
       set properly */
4090
    cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4091
                                                         save.efer)));
4092
    env->eflags = 0;
4093
    cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4094
                                                           save.rflags)),
4095
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4096
    CC_OP = CC_OP_EFLAGS;
4097

    
4098
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
4099
                       env, R_ES);
4100
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
4101
                       env, R_CS);
4102
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
4103
                       env, R_SS);
4104
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
4105
                       env, R_DS);
4106

    
4107
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4108
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4109
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4110

    
4111
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4112
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4113

    
4114
    /* other setups */
4115
    cpu_x86_set_cpl(env, 0);
4116
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
4117
             exit_code);
4118
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
4119
             exit_info_1);
4120

    
4121
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
4122
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4123
                                              control.event_inj)));
4124
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
4125
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4126
                                              control.event_inj_err)));
4127
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
4128

    
4129
    env->hflags2 &= ~HF2_GIF_MASK;
4130
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4131

    
4132
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4133

    
4134
    /* Clears the TSC_OFFSET inside the processor. */
4135

    
4136
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4137
       from the page table indicated the host's CR3. If the PDPEs contain
4138
       illegal state, the processor causes a shutdown. */
4139

    
4140
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4141
    env->cr[0] |= CR0_PE_MASK;
4142
    env->eflags &= ~VM_MASK;
4143

    
4144
    /* Disables all breakpoints in the host DR7 register. */
4145

    
4146
    /* Checks the reloaded host state for consistency. */
4147

    
4148
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4149
       host's code segment or non-canonical (in the case of long mode), a
4150
       #GP fault is delivered inside the host. */
4151

    
4152
    /* remove any pending exception */
4153
    env->exception_index = -1;
4154
    env->error_code = 0;
4155
    env->old_exception = -1;
4156

    
4157
    cpu_loop_exit(env);
4158
}
4159

    
4160
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4161
{
4162
    env = nenv;
4163
    helper_vmexit(exit_code, exit_info_1);
4164
}
4165

    
4166
#endif