Statistics
| Branch: | Revision:

root / target-i386 / op_helper.c @ 997ff0d9

History | View | Annotate | Download (146.3 kB)

1
/*
2
 *  i386 helpers
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "dyngen-exec.h"
22
#include "host-utils.h"
23
#include "ioport.h"
24
#include "qemu-log.h"
25
#include "cpu-defs.h"
26
#include "helper.h"
27

    
28
#if !defined(CONFIG_USER_ONLY)
29
#include "softmmu_exec.h"
30
#endif /* !defined(CONFIG_USER_ONLY) */
31

    
32
//#define DEBUG_PCALL
33
//#define DEBUG_MULDIV
34

    
35
#ifdef DEBUG_PCALL
36
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37
# define LOG_PCALL_STATE(env)                                  \
38
    log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
39
#else
40
# define LOG_PCALL(...) do { } while (0)
41
# define LOG_PCALL_STATE(env) do { } while (0)
42
#endif
43

    
44
/* n must be a constant to be efficient */
45
static inline target_long lshift(target_long x, int n)
46
{
47
    if (n >= 0) {
48
        return x << n;
49
    } else {
50
        return x >> (-n);
51
    }
52
}
53

    
54
static inline uint32_t cpu_compute_eflags(CPUX86State *env)
55
{
56
    return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK);
57
}
58

    
59
/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
60
static inline void cpu_load_eflags(CPUX86State *env, int eflags,
61
                                   int update_mask)
62
{
63
    CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
64
    DF = 1 - (2 * ((eflags >> 10) & 1));
65
    env->eflags = (env->eflags & ~update_mask) |
66
        (eflags & update_mask) | 0x2;
67
}
68

    
69
/* load efer and update the corresponding hflags. XXX: do consistency
70
   checks with cpuid bits? */
71
static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
72
{
73
    env->efer = val;
74
    env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
75
    if (env->efer & MSR_EFER_LMA) {
76
        env->hflags |= HF_LMA_MASK;
77
    }
78
    if (env->efer & MSR_EFER_SVME) {
79
        env->hflags |= HF_SVME_MASK;
80
    }
81
}
82

    
83
static const uint8_t parity_table[256] = {
84
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
85
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
86
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
87
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
88
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
89
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
90
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
91
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
92
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
93
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
94
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
95
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
96
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
97
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
98
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
99
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
100
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
101
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
102
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
103
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
104
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
105
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
106
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
107
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
108
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
109
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
110
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
111
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
112
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
113
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
114
    CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
115
    0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
116
};
117

    
118
/* modulo 17 table */
119
static const uint8_t rclw_table[32] = {
120
    0, 1, 2, 3, 4, 5, 6, 7,
121
    8, 9, 10, 11, 12, 13, 14, 15,
122
    16, 0, 1, 2, 3, 4, 5, 6,
123
    7, 8, 9, 10, 11, 12, 13, 14,
124
};
125

    
126
/* modulo 9 table */
127
static const uint8_t rclb_table[32] = {
128
    0, 1, 2, 3, 4, 5, 6, 7,
129
    8, 0, 1, 2, 3, 4, 5, 6,
130
    7, 8, 0, 1, 2, 3, 4, 5,
131
    6, 7, 8, 0, 1, 2, 3, 4,
132
};
133

    
134
/* broken thread support */
135

    
136
static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
137

    
138
void helper_lock(void)
139
{
140
    spin_lock(&global_cpu_lock);
141
}
142

    
143
void helper_unlock(void)
144
{
145
    spin_unlock(&global_cpu_lock);
146
}
147

    
148
void helper_write_eflags(target_ulong t0, uint32_t update_mask)
149
{
150
    cpu_load_eflags(env, t0, update_mask);
151
}
152

    
153
target_ulong helper_read_eflags(void)
154
{
155
    uint32_t eflags;
156

    
157
    eflags = helper_cc_compute_all(CC_OP);
158
    eflags |= (DF & DF_MASK);
159
    eflags |= env->eflags & ~(VM_MASK | RF_MASK);
160
    return eflags;
161
}
162

    
163
/* return non zero if error */
164
static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
165
                               int selector)
166
{
167
    SegmentCache *dt;
168
    int index;
169
    target_ulong ptr;
170

    
171
    if (selector & 0x4) {
172
        dt = &env->ldt;
173
    } else {
174
        dt = &env->gdt;
175
    }
176
    index = selector & ~7;
177
    if ((index + 7) > dt->limit) {
178
        return -1;
179
    }
180
    ptr = dt->base + index;
181
    *e1_ptr = ldl_kernel(ptr);
182
    *e2_ptr = ldl_kernel(ptr + 4);
183
    return 0;
184
}
185

    
186
static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
187
{
188
    unsigned int limit;
189

    
190
    limit = (e1 & 0xffff) | (e2 & 0x000f0000);
191
    if (e2 & DESC_G_MASK) {
192
        limit = (limit << 12) | 0xfff;
193
    }
194
    return limit;
195
}
196

    
197
static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
198
{
199
    return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
200
}
201

    
202
static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
203
                                         uint32_t e2)
204
{
205
    sc->base = get_seg_base(e1, e2);
206
    sc->limit = get_seg_limit(e1, e2);
207
    sc->flags = e2;
208
}
209

    
210
/* init the segment cache in vm86 mode. */
211
static inline void load_seg_vm(int seg, int selector)
212
{
213
    selector &= 0xffff;
214
    cpu_x86_load_seg_cache(env, seg, selector,
215
                           (selector << 4), 0xffff, 0);
216
}
217

    
218
static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
219
                                       uint32_t *esp_ptr, int dpl)
220
{
221
    int type, index, shift;
222

    
223
#if 0
224
    {
225
        int i;
226
        printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
227
        for (i = 0; i < env->tr.limit; i++) {
228
            printf("%02x ", env->tr.base[i]);
229
            if ((i & 7) == 7) {
230
                printf("\n");
231
            }
232
        }
233
        printf("\n");
234
    }
235
#endif
236

    
237
    if (!(env->tr.flags & DESC_P_MASK)) {
238
        cpu_abort(env, "invalid tss");
239
    }
240
    type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
241
    if ((type & 7) != 1) {
242
        cpu_abort(env, "invalid tss type");
243
    }
244
    shift = type >> 3;
245
    index = (dpl * 4 + 2) << shift;
246
    if (index + (4 << shift) - 1 > env->tr.limit) {
247
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
248
    }
249
    if (shift == 0) {
250
        *esp_ptr = lduw_kernel(env->tr.base + index);
251
        *ss_ptr = lduw_kernel(env->tr.base + index + 2);
252
    } else {
253
        *esp_ptr = ldl_kernel(env->tr.base + index);
254
        *ss_ptr = lduw_kernel(env->tr.base + index + 4);
255
    }
256
}
257

    
258
/* XXX: merge with load_seg() */
259
static void tss_load_seg(int seg_reg, int selector)
260
{
261
    uint32_t e1, e2;
262
    int rpl, dpl, cpl;
263

    
264
    if ((selector & 0xfffc) != 0) {
265
        if (load_segment(&e1, &e2, selector) != 0) {
266
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
267
        }
268
        if (!(e2 & DESC_S_MASK)) {
269
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
270
        }
271
        rpl = selector & 3;
272
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
273
        cpl = env->hflags & HF_CPL_MASK;
274
        if (seg_reg == R_CS) {
275
            if (!(e2 & DESC_CS_MASK)) {
276
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
277
            }
278
            /* XXX: is it correct? */
279
            if (dpl != rpl) {
280
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
281
            }
282
            if ((e2 & DESC_C_MASK) && dpl > rpl) {
283
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
284
            }
285
        } else if (seg_reg == R_SS) {
286
            /* SS must be writable data */
287
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
288
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
289
            }
290
            if (dpl != cpl || dpl != rpl) {
291
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
292
            }
293
        } else {
294
            /* not readable code */
295
            if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
296
                raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
297
            }
298
            /* if data or non conforming code, checks the rights */
299
            if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
300
                if (dpl < cpl || dpl < rpl) {
301
                    raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
302
                }
303
            }
304
        }
305
        if (!(e2 & DESC_P_MASK)) {
306
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
307
        }
308
        cpu_x86_load_seg_cache(env, seg_reg, selector,
309
                               get_seg_base(e1, e2),
310
                               get_seg_limit(e1, e2),
311
                               e2);
312
    } else {
313
        if (seg_reg == R_SS || seg_reg == R_CS) {
314
            raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
315
        }
316
    }
317
}
318

    
319
#define SWITCH_TSS_JMP  0
320
#define SWITCH_TSS_IRET 1
321
#define SWITCH_TSS_CALL 2
322

    
323
/* XXX: restore CPU state in registers (PowerPC case) */
324
static void switch_tss(int tss_selector,
325
                       uint32_t e1, uint32_t e2, int source,
326
                       uint32_t next_eip)
327
{
328
    int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
329
    target_ulong tss_base;
330
    uint32_t new_regs[8], new_segs[6];
331
    uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
332
    uint32_t old_eflags, eflags_mask;
333
    SegmentCache *dt;
334
    int index;
335
    target_ulong ptr;
336

    
337
    type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
338
    LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
339
              source);
340

    
341
    /* if task gate, we read the TSS segment and we load it */
342
    if (type == 5) {
343
        if (!(e2 & DESC_P_MASK)) {
344
            raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
345
        }
346
        tss_selector = e1 >> 16;
347
        if (tss_selector & 4) {
348
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
349
        }
350
        if (load_segment(&e1, &e2, tss_selector) != 0) {
351
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
352
        }
353
        if (e2 & DESC_S_MASK) {
354
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
355
        }
356
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
357
        if ((type & 7) != 1) {
358
            raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
359
        }
360
    }
361

    
362
    if (!(e2 & DESC_P_MASK)) {
363
        raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
364
    }
365

    
366
    if (type & 8) {
367
        tss_limit_max = 103;
368
    } else {
369
        tss_limit_max = 43;
370
    }
371
    tss_limit = get_seg_limit(e1, e2);
372
    tss_base = get_seg_base(e1, e2);
373
    if ((tss_selector & 4) != 0 ||
374
        tss_limit < tss_limit_max) {
375
        raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
376
    }
377
    old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
378
    if (old_type & 8) {
379
        old_tss_limit_max = 103;
380
    } else {
381
        old_tss_limit_max = 43;
382
    }
383

    
384
    /* read all the registers from the new TSS */
385
    if (type & 8) {
386
        /* 32 bit */
387
        new_cr3 = ldl_kernel(tss_base + 0x1c);
388
        new_eip = ldl_kernel(tss_base + 0x20);
389
        new_eflags = ldl_kernel(tss_base + 0x24);
390
        for (i = 0; i < 8; i++) {
391
            new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
392
        }
393
        for (i = 0; i < 6; i++) {
394
            new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
395
        }
396
        new_ldt = lduw_kernel(tss_base + 0x60);
397
        new_trap = ldl_kernel(tss_base + 0x64);
398
    } else {
399
        /* 16 bit */
400
        new_cr3 = 0;
401
        new_eip = lduw_kernel(tss_base + 0x0e);
402
        new_eflags = lduw_kernel(tss_base + 0x10);
403
        for (i = 0; i < 8; i++) {
404
            new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
405
        }
406
        for (i = 0; i < 4; i++) {
407
            new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
408
        }
409
        new_ldt = lduw_kernel(tss_base + 0x2a);
410
        new_segs[R_FS] = 0;
411
        new_segs[R_GS] = 0;
412
        new_trap = 0;
413
    }
414
    /* XXX: avoid a compiler warning, see
415
     http://support.amd.com/us/Processor_TechDocs/24593.pdf
416
     chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
417
    (void)new_trap;
418

    
419
    /* NOTE: we must avoid memory exceptions during the task switch,
420
       so we make dummy accesses before */
421
    /* XXX: it can still fail in some cases, so a bigger hack is
422
       necessary to valid the TLB after having done the accesses */
423

    
424
    v1 = ldub_kernel(env->tr.base);
425
    v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
426
    stb_kernel(env->tr.base, v1);
427
    stb_kernel(env->tr.base + old_tss_limit_max, v2);
428

    
429
    /* clear busy bit (it is restartable) */
430
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
431
        target_ulong ptr;
432
        uint32_t e2;
433

    
434
        ptr = env->gdt.base + (env->tr.selector & ~7);
435
        e2 = ldl_kernel(ptr + 4);
436
        e2 &= ~DESC_TSS_BUSY_MASK;
437
        stl_kernel(ptr + 4, e2);
438
    }
439
    old_eflags = cpu_compute_eflags(env);
440
    if (source == SWITCH_TSS_IRET) {
441
        old_eflags &= ~NT_MASK;
442
    }
443

    
444
    /* save the current state in the old TSS */
445
    if (type & 8) {
446
        /* 32 bit */
447
        stl_kernel(env->tr.base + 0x20, next_eip);
448
        stl_kernel(env->tr.base + 0x24, old_eflags);
449
        stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
450
        stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
451
        stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
452
        stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
453
        stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
454
        stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
455
        stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
456
        stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
457
        for (i = 0; i < 6; i++) {
458
            stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
459
        }
460
    } else {
461
        /* 16 bit */
462
        stw_kernel(env->tr.base + 0x0e, next_eip);
463
        stw_kernel(env->tr.base + 0x10, old_eflags);
464
        stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
465
        stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
466
        stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
467
        stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
468
        stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
469
        stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
470
        stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
471
        stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
472
        for (i = 0; i < 4; i++) {
473
            stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
474
        }
475
    }
476

    
477
    /* now if an exception occurs, it will occurs in the next task
478
       context */
479

    
480
    if (source == SWITCH_TSS_CALL) {
481
        stw_kernel(tss_base, env->tr.selector);
482
        new_eflags |= NT_MASK;
483
    }
484

    
485
    /* set busy bit */
486
    if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
487
        target_ulong ptr;
488
        uint32_t e2;
489

    
490
        ptr = env->gdt.base + (tss_selector & ~7);
491
        e2 = ldl_kernel(ptr + 4);
492
        e2 |= DESC_TSS_BUSY_MASK;
493
        stl_kernel(ptr + 4, e2);
494
    }
495

    
496
    /* set the new CPU state */
497
    /* from this point, any exception which occurs can give problems */
498
    env->cr[0] |= CR0_TS_MASK;
499
    env->hflags |= HF_TS_MASK;
500
    env->tr.selector = tss_selector;
501
    env->tr.base = tss_base;
502
    env->tr.limit = tss_limit;
503
    env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
504

    
505
    if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
506
        cpu_x86_update_cr3(env, new_cr3);
507
    }
508

    
509
    /* load all registers without an exception, then reload them with
510
       possible exception */
511
    env->eip = new_eip;
512
    eflags_mask = TF_MASK | AC_MASK | ID_MASK |
513
        IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
514
    if (!(type & 8)) {
515
        eflags_mask &= 0xffff;
516
    }
517
    cpu_load_eflags(env, new_eflags, eflags_mask);
518
    /* XXX: what to do in 16 bit case? */
519
    EAX = new_regs[0];
520
    ECX = new_regs[1];
521
    EDX = new_regs[2];
522
    EBX = new_regs[3];
523
    ESP = new_regs[4];
524
    EBP = new_regs[5];
525
    ESI = new_regs[6];
526
    EDI = new_regs[7];
527
    if (new_eflags & VM_MASK) {
528
        for (i = 0; i < 6; i++) {
529
            load_seg_vm(i, new_segs[i]);
530
        }
531
        /* in vm86, CPL is always 3 */
532
        cpu_x86_set_cpl(env, 3);
533
    } else {
534
        /* CPL is set the RPL of CS */
535
        cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
536
        /* first just selectors as the rest may trigger exceptions */
537
        for (i = 0; i < 6; i++) {
538
            cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
539
        }
540
    }
541

    
542
    env->ldt.selector = new_ldt & ~4;
543
    env->ldt.base = 0;
544
    env->ldt.limit = 0;
545
    env->ldt.flags = 0;
546

    
547
    /* load the LDT */
548
    if (new_ldt & 4) {
549
        raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
550
    }
551

    
552
    if ((new_ldt & 0xfffc) != 0) {
553
        dt = &env->gdt;
554
        index = new_ldt & ~7;
555
        if ((index + 7) > dt->limit) {
556
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
557
        }
558
        ptr = dt->base + index;
559
        e1 = ldl_kernel(ptr);
560
        e2 = ldl_kernel(ptr + 4);
561
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
562
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
563
        }
564
        if (!(e2 & DESC_P_MASK)) {
565
            raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
566
        }
567
        load_seg_cache_raw_dt(&env->ldt, e1, e2);
568
    }
569

    
570
    /* load the segments */
571
    if (!(new_eflags & VM_MASK)) {
572
        tss_load_seg(R_CS, new_segs[R_CS]);
573
        tss_load_seg(R_SS, new_segs[R_SS]);
574
        tss_load_seg(R_ES, new_segs[R_ES]);
575
        tss_load_seg(R_DS, new_segs[R_DS]);
576
        tss_load_seg(R_FS, new_segs[R_FS]);
577
        tss_load_seg(R_GS, new_segs[R_GS]);
578
    }
579

    
580
    /* check that EIP is in the CS segment limits */
581
    if (new_eip > env->segs[R_CS].limit) {
582
        /* XXX: different exception if CALL? */
583
        raise_exception_err(env, EXCP0D_GPF, 0);
584
    }
585

    
586
#ifndef CONFIG_USER_ONLY
587
    /* reset local breakpoints */
588
    if (env->dr[7] & 0x55) {
589
        for (i = 0; i < 4; i++) {
590
            if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
591
                hw_breakpoint_remove(env, i);
592
            }
593
        }
594
        env->dr[7] &= ~0x55;
595
    }
596
#endif
597
}
598

    
599
/* check if Port I/O is allowed in TSS */
600
static inline void check_io(int addr, int size)
601
{
602
    int io_offset, val, mask;
603

    
604
    /* TSS must be a valid 32 bit one */
605
    if (!(env->tr.flags & DESC_P_MASK) ||
606
        ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
607
        env->tr.limit < 103) {
608
        goto fail;
609
    }
610
    io_offset = lduw_kernel(env->tr.base + 0x66);
611
    io_offset += (addr >> 3);
612
    /* Note: the check needs two bytes */
613
    if ((io_offset + 1) > env->tr.limit) {
614
        goto fail;
615
    }
616
    val = lduw_kernel(env->tr.base + io_offset);
617
    val >>= (addr & 7);
618
    mask = (1 << size) - 1;
619
    /* all bits must be zero to allow the I/O */
620
    if ((val & mask) != 0) {
621
    fail:
622
        raise_exception_err(env, EXCP0D_GPF, 0);
623
    }
624
}
625

    
626
void helper_check_iob(uint32_t t0)
627
{
628
    check_io(t0, 1);
629
}
630

    
631
void helper_check_iow(uint32_t t0)
632
{
633
    check_io(t0, 2);
634
}
635

    
636
void helper_check_iol(uint32_t t0)
637
{
638
    check_io(t0, 4);
639
}
640

    
641
void helper_outb(uint32_t port, uint32_t data)
642
{
643
    cpu_outb(port, data & 0xff);
644
}
645

    
646
target_ulong helper_inb(uint32_t port)
647
{
648
    return cpu_inb(port);
649
}
650

    
651
void helper_outw(uint32_t port, uint32_t data)
652
{
653
    cpu_outw(port, data & 0xffff);
654
}
655

    
656
target_ulong helper_inw(uint32_t port)
657
{
658
    return cpu_inw(port);
659
}
660

    
661
void helper_outl(uint32_t port, uint32_t data)
662
{
663
    cpu_outl(port, data);
664
}
665

    
666
target_ulong helper_inl(uint32_t port)
667
{
668
    return cpu_inl(port);
669
}
670

    
671
static inline unsigned int get_sp_mask(unsigned int e2)
672
{
673
    if (e2 & DESC_B_MASK) {
674
        return 0xffffffff;
675
    } else {
676
        return 0xffff;
677
    }
678
}
679

    
680
static int exception_has_error_code(int intno)
681
{
682
    switch (intno) {
683
    case 8:
684
    case 10:
685
    case 11:
686
    case 12:
687
    case 13:
688
    case 14:
689
    case 17:
690
        return 1;
691
    }
692
    return 0;
693
}
694

    
695
#ifdef TARGET_X86_64
696
#define SET_ESP(val, sp_mask)                           \
697
    do {                                                \
698
        if ((sp_mask) == 0xffff) {                      \
699
            ESP = (ESP & ~0xffff) | ((val) & 0xffff);   \
700
        } else if ((sp_mask) == 0xffffffffLL) {         \
701
            ESP = (uint32_t)(val);                      \
702
        } else {                                        \
703
            ESP = (val);                                \
704
        }                                               \
705
    } while (0)
706
#else
707
#define SET_ESP(val, sp_mask)                           \
708
    do {                                                \
709
        ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
710
    } while (0)
711
#endif
712

    
713
/* in 64-bit machines, this can overflow. So this segment addition macro
714
 * can be used to trim the value to 32-bit whenever needed */
715
#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
716

    
717
/* XXX: add a is_user flag to have proper security support */
718
#define PUSHW(ssp, sp, sp_mask, val)                    \
719
    {                                                   \
720
        sp -= 2;                                        \
721
        stw_kernel((ssp) + (sp & (sp_mask)), (val));    \
722
    }
723

    
724
#define PUSHL(ssp, sp, sp_mask, val)                                    \
725
    {                                                                   \
726
        sp -= 4;                                                        \
727
        stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));        \
728
    }
729

    
730
#define POPW(ssp, sp, sp_mask, val)                     \
731
    {                                                   \
732
        val = lduw_kernel((ssp) + (sp & (sp_mask)));    \
733
        sp += 2;                                        \
734
    }
735

    
736
#define POPL(ssp, sp, sp_mask, val)                             \
737
    {                                                           \
738
        val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
739
        sp += 4;                                                \
740
    }
741

    
742
/* protected mode interrupt */
743
static void do_interrupt_protected(int intno, int is_int, int error_code,
744
                                   unsigned int next_eip, int is_hw)
745
{
746
    SegmentCache *dt;
747
    target_ulong ptr, ssp;
748
    int type, dpl, selector, ss_dpl, cpl;
749
    int has_error_code, new_stack, shift;
750
    uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
751
    uint32_t old_eip, sp_mask;
752

    
753
    has_error_code = 0;
754
    if (!is_int && !is_hw) {
755
        has_error_code = exception_has_error_code(intno);
756
    }
757
    if (is_int) {
758
        old_eip = next_eip;
759
    } else {
760
        old_eip = env->eip;
761
    }
762

    
763
    dt = &env->idt;
764
    if (intno * 8 + 7 > dt->limit) {
765
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
766
    }
767
    ptr = dt->base + intno * 8;
768
    e1 = ldl_kernel(ptr);
769
    e2 = ldl_kernel(ptr + 4);
770
    /* check gate type */
771
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
772
    switch (type) {
773
    case 5: /* task gate */
774
        /* must do that check here to return the correct error code */
775
        if (!(e2 & DESC_P_MASK)) {
776
            raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
777
        }
778
        switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
779
        if (has_error_code) {
780
            int type;
781
            uint32_t mask;
782

    
783
            /* push the error code */
784
            type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
785
            shift = type >> 3;
786
            if (env->segs[R_SS].flags & DESC_B_MASK) {
787
                mask = 0xffffffff;
788
            } else {
789
                mask = 0xffff;
790
            }
791
            esp = (ESP - (2 << shift)) & mask;
792
            ssp = env->segs[R_SS].base + esp;
793
            if (shift) {
794
                stl_kernel(ssp, error_code);
795
            } else {
796
                stw_kernel(ssp, error_code);
797
            }
798
            SET_ESP(esp, mask);
799
        }
800
        return;
801
    case 6: /* 286 interrupt gate */
802
    case 7: /* 286 trap gate */
803
    case 14: /* 386 interrupt gate */
804
    case 15: /* 386 trap gate */
805
        break;
806
    default:
807
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
808
        break;
809
    }
810
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
811
    cpl = env->hflags & HF_CPL_MASK;
812
    /* check privilege if software int */
813
    if (is_int && dpl < cpl) {
814
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
815
    }
816
    /* check valid bit */
817
    if (!(e2 & DESC_P_MASK)) {
818
        raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
819
    }
820
    selector = e1 >> 16;
821
    offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
822
    if ((selector & 0xfffc) == 0) {
823
        raise_exception_err(env, EXCP0D_GPF, 0);
824
    }
825
    if (load_segment(&e1, &e2, selector) != 0) {
826
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
827
    }
828
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
829
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
830
    }
831
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
832
    if (dpl > cpl) {
833
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
834
    }
835
    if (!(e2 & DESC_P_MASK)) {
836
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
837
    }
838
    if (!(e2 & DESC_C_MASK) && dpl < cpl) {
839
        /* to inner privilege */
840
        get_ss_esp_from_tss(&ss, &esp, dpl);
841
        if ((ss & 0xfffc) == 0) {
842
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
843
        }
844
        if ((ss & 3) != dpl) {
845
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
846
        }
847
        if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
848
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
849
        }
850
        ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
851
        if (ss_dpl != dpl) {
852
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
853
        }
854
        if (!(ss_e2 & DESC_S_MASK) ||
855
            (ss_e2 & DESC_CS_MASK) ||
856
            !(ss_e2 & DESC_W_MASK)) {
857
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
858
        }
859
        if (!(ss_e2 & DESC_P_MASK)) {
860
            raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
861
        }
862
        new_stack = 1;
863
        sp_mask = get_sp_mask(ss_e2);
864
        ssp = get_seg_base(ss_e1, ss_e2);
865
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
866
        /* to same privilege */
867
        if (env->eflags & VM_MASK) {
868
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
869
        }
870
        new_stack = 0;
871
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
872
        ssp = env->segs[R_SS].base;
873
        esp = ESP;
874
        dpl = cpl;
875
    } else {
876
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
877
        new_stack = 0; /* avoid warning */
878
        sp_mask = 0; /* avoid warning */
879
        ssp = 0; /* avoid warning */
880
        esp = 0; /* avoid warning */
881
    }
882

    
883
    shift = type >> 3;
884

    
885
#if 0
886
    /* XXX: check that enough room is available */
887
    push_size = 6 + (new_stack << 2) + (has_error_code << 1);
888
    if (env->eflags & VM_MASK) {
889
        push_size += 8;
890
    }
891
    push_size <<= shift;
892
#endif
893
    if (shift == 1) {
894
        if (new_stack) {
895
            if (env->eflags & VM_MASK) {
896
                PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
897
                PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
898
                PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
899
                PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
900
            }
901
            PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
902
            PUSHL(ssp, esp, sp_mask, ESP);
903
        }
904
        PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
905
        PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
906
        PUSHL(ssp, esp, sp_mask, old_eip);
907
        if (has_error_code) {
908
            PUSHL(ssp, esp, sp_mask, error_code);
909
        }
910
    } else {
911
        if (new_stack) {
912
            if (env->eflags & VM_MASK) {
913
                PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
914
                PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
915
                PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
916
                PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
917
            }
918
            PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
919
            PUSHW(ssp, esp, sp_mask, ESP);
920
        }
921
        PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
922
        PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
923
        PUSHW(ssp, esp, sp_mask, old_eip);
924
        if (has_error_code) {
925
            PUSHW(ssp, esp, sp_mask, error_code);
926
        }
927
    }
928

    
929
    if (new_stack) {
930
        if (env->eflags & VM_MASK) {
931
            cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
932
            cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
933
            cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
934
            cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
935
        }
936
        ss = (ss & ~3) | dpl;
937
        cpu_x86_load_seg_cache(env, R_SS, ss,
938
                               ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
939
    }
940
    SET_ESP(esp, sp_mask);
941

    
942
    selector = (selector & ~3) | dpl;
943
    cpu_x86_load_seg_cache(env, R_CS, selector,
944
                   get_seg_base(e1, e2),
945
                   get_seg_limit(e1, e2),
946
                   e2);
947
    cpu_x86_set_cpl(env, dpl);
948
    env->eip = offset;
949

    
950
    /* interrupt gate clear IF mask */
951
    if ((type & 1) == 0) {
952
        env->eflags &= ~IF_MASK;
953
    }
954
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
955
}
956

    
957
#ifdef TARGET_X86_64
958

    
959
#define PUSHQ(sp, val)                          \
960
    {                                           \
961
        sp -= 8;                                \
962
        stq_kernel(sp, (val));                  \
963
    }
964

    
965
#define POPQ(sp, val)                           \
966
    {                                           \
967
        val = ldq_kernel(sp);                   \
968
        sp += 8;                                \
969
    }
970

    
971
static inline target_ulong get_rsp_from_tss(int level)
972
{
973
    int index;
974

    
975
#if 0
976
    printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
977
           env->tr.base, env->tr.limit);
978
#endif
979

    
980
    if (!(env->tr.flags & DESC_P_MASK)) {
981
        cpu_abort(env, "invalid tss");
982
    }
983
    index = 8 * level + 4;
984
    if ((index + 7) > env->tr.limit) {
985
        raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
986
    }
987
    return ldq_kernel(env->tr.base + index);
988
}
989

    
990
/* 64 bit interrupt */
991
static void do_interrupt64(int intno, int is_int, int error_code,
992
                           target_ulong next_eip, int is_hw)
993
{
994
    SegmentCache *dt;
995
    target_ulong ptr;
996
    int type, dpl, selector, cpl, ist;
997
    int has_error_code, new_stack;
998
    uint32_t e1, e2, e3, ss;
999
    target_ulong old_eip, esp, offset;
1000

    
1001
    has_error_code = 0;
1002
    if (!is_int && !is_hw) {
1003
        has_error_code = exception_has_error_code(intno);
1004
    }
1005
    if (is_int) {
1006
        old_eip = next_eip;
1007
    } else {
1008
        old_eip = env->eip;
1009
    }
1010

    
1011
    dt = &env->idt;
1012
    if (intno * 16 + 15 > dt->limit) {
1013
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1014
    }
1015
    ptr = dt->base + intno * 16;
1016
    e1 = ldl_kernel(ptr);
1017
    e2 = ldl_kernel(ptr + 4);
1018
    e3 = ldl_kernel(ptr + 8);
1019
    /* check gate type */
1020
    type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1021
    switch (type) {
1022
    case 14: /* 386 interrupt gate */
1023
    case 15: /* 386 trap gate */
1024
        break;
1025
    default:
1026
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1027
        break;
1028
    }
1029
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1030
    cpl = env->hflags & HF_CPL_MASK;
1031
    /* check privilege if software int */
1032
    if (is_int && dpl < cpl) {
1033
        raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
1034
    }
1035
    /* check valid bit */
1036
    if (!(e2 & DESC_P_MASK)) {
1037
        raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
1038
    }
1039
    selector = e1 >> 16;
1040
    offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1041
    ist = e2 & 7;
1042
    if ((selector & 0xfffc) == 0) {
1043
        raise_exception_err(env, EXCP0D_GPF, 0);
1044
    }
1045

    
1046
    if (load_segment(&e1, &e2, selector) != 0) {
1047
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1048
    }
1049
    if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1050
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1051
    }
1052
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1053
    if (dpl > cpl) {
1054
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1055
    }
1056
    if (!(e2 & DESC_P_MASK)) {
1057
        raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1058
    }
1059
    if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
1060
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1061
    }
1062
    if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1063
        /* to inner privilege */
1064
        if (ist != 0) {
1065
            esp = get_rsp_from_tss(ist + 3);
1066
        } else {
1067
            esp = get_rsp_from_tss(dpl);
1068
        }
1069
        esp &= ~0xfLL; /* align stack */
1070
        ss = 0;
1071
        new_stack = 1;
1072
    } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1073
        /* to same privilege */
1074
        if (env->eflags & VM_MASK) {
1075
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1076
        }
1077
        new_stack = 0;
1078
        if (ist != 0) {
1079
            esp = get_rsp_from_tss(ist + 3);
1080
        } else {
1081
            esp = ESP;
1082
        }
1083
        esp &= ~0xfLL; /* align stack */
1084
        dpl = cpl;
1085
    } else {
1086
        raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1087
        new_stack = 0; /* avoid warning */
1088
        esp = 0; /* avoid warning */
1089
    }
1090

    
1091
    PUSHQ(esp, env->segs[R_SS].selector);
1092
    PUSHQ(esp, ESP);
1093
    PUSHQ(esp, cpu_compute_eflags(env));
1094
    PUSHQ(esp, env->segs[R_CS].selector);
1095
    PUSHQ(esp, old_eip);
1096
    if (has_error_code) {
1097
        PUSHQ(esp, error_code);
1098
    }
1099

    
1100
    if (new_stack) {
1101
        ss = 0 | dpl;
1102
        cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1103
    }
1104
    ESP = esp;
1105

    
1106
    selector = (selector & ~3) | dpl;
1107
    cpu_x86_load_seg_cache(env, R_CS, selector,
1108
                   get_seg_base(e1, e2),
1109
                   get_seg_limit(e1, e2),
1110
                   e2);
1111
    cpu_x86_set_cpl(env, dpl);
1112
    env->eip = offset;
1113

    
1114
    /* interrupt gate clear IF mask */
1115
    if ((type & 1) == 0) {
1116
        env->eflags &= ~IF_MASK;
1117
    }
1118
    env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1119
}
1120
#endif
1121

    
1122
#ifdef TARGET_X86_64
1123
#if defined(CONFIG_USER_ONLY)
1124
void helper_syscall(int next_eip_addend)
1125
{
1126
    env->exception_index = EXCP_SYSCALL;
1127
    env->exception_next_eip = env->eip + next_eip_addend;
1128
    cpu_loop_exit(env);
1129
}
1130
#else
1131
void helper_syscall(int next_eip_addend)
1132
{
1133
    int selector;
1134

    
1135
    if (!(env->efer & MSR_EFER_SCE)) {
1136
        raise_exception_err(env, EXCP06_ILLOP, 0);
1137
    }
1138
    selector = (env->star >> 32) & 0xffff;
1139
    if (env->hflags & HF_LMA_MASK) {
1140
        int code64;
1141

    
1142
        ECX = env->eip + next_eip_addend;
1143
        env->regs[11] = cpu_compute_eflags(env);
1144

    
1145
        code64 = env->hflags & HF_CS64_MASK;
1146

    
1147
        cpu_x86_set_cpl(env, 0);
1148
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1149
                           0, 0xffffffff,
1150
                               DESC_G_MASK | DESC_P_MASK |
1151
                               DESC_S_MASK |
1152
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1153
                               DESC_L_MASK);
1154
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1155
                               0, 0xffffffff,
1156
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1157
                               DESC_S_MASK |
1158
                               DESC_W_MASK | DESC_A_MASK);
1159
        env->eflags &= ~env->fmask;
1160
        cpu_load_eflags(env, env->eflags, 0);
1161
        if (code64) {
1162
            env->eip = env->lstar;
1163
        } else {
1164
            env->eip = env->cstar;
1165
        }
1166
    } else {
1167
        ECX = (uint32_t)(env->eip + next_eip_addend);
1168

    
1169
        cpu_x86_set_cpl(env, 0);
1170
        cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1171
                           0, 0xffffffff,
1172
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1173
                               DESC_S_MASK |
1174
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1175
        cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1176
                               0, 0xffffffff,
1177
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1178
                               DESC_S_MASK |
1179
                               DESC_W_MASK | DESC_A_MASK);
1180
        env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1181
        env->eip = (uint32_t)env->star;
1182
    }
1183
}
1184
#endif
1185
#endif
1186

    
1187
#ifdef TARGET_X86_64
1188
void helper_sysret(int dflag)
1189
{
1190
    int cpl, selector;
1191

    
1192
    if (!(env->efer & MSR_EFER_SCE)) {
1193
        raise_exception_err(env, EXCP06_ILLOP, 0);
1194
    }
1195
    cpl = env->hflags & HF_CPL_MASK;
1196
    if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1197
        raise_exception_err(env, EXCP0D_GPF, 0);
1198
    }
1199
    selector = (env->star >> 48) & 0xffff;
1200
    if (env->hflags & HF_LMA_MASK) {
1201
        if (dflag == 2) {
1202
            cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1203
                                   0, 0xffffffff,
1204
                                   DESC_G_MASK | DESC_P_MASK |
1205
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1206
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1207
                                   DESC_L_MASK);
1208
            env->eip = ECX;
1209
        } else {
1210
            cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1211
                                   0, 0xffffffff,
1212
                                   DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1213
                                   DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1214
                                   DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1215
            env->eip = (uint32_t)ECX;
1216
        }
1217
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1218
                               0, 0xffffffff,
1219
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1220
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1221
                               DESC_W_MASK | DESC_A_MASK);
1222
        cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1223
                        | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1224
                        NT_MASK);
1225
        cpu_x86_set_cpl(env, 3);
1226
    } else {
1227
        cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1228
                               0, 0xffffffff,
1229
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1230
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1231
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1232
        env->eip = (uint32_t)ECX;
1233
        cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1234
                               0, 0xffffffff,
1235
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1236
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1237
                               DESC_W_MASK | DESC_A_MASK);
1238
        env->eflags |= IF_MASK;
1239
        cpu_x86_set_cpl(env, 3);
1240
    }
1241
}
1242
#endif
1243

    
1244
/* real mode interrupt */
1245
static void do_interrupt_real(int intno, int is_int, int error_code,
1246
                              unsigned int next_eip)
1247
{
1248
    SegmentCache *dt;
1249
    target_ulong ptr, ssp;
1250
    int selector;
1251
    uint32_t offset, esp;
1252
    uint32_t old_cs, old_eip;
1253

    
1254
    /* real mode (simpler!) */
1255
    dt = &env->idt;
1256
    if (intno * 4 + 3 > dt->limit) {
1257
        raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1258
    }
1259
    ptr = dt->base + intno * 4;
1260
    offset = lduw_kernel(ptr);
1261
    selector = lduw_kernel(ptr + 2);
1262
    esp = ESP;
1263
    ssp = env->segs[R_SS].base;
1264
    if (is_int) {
1265
        old_eip = next_eip;
1266
    } else {
1267
        old_eip = env->eip;
1268
    }
1269
    old_cs = env->segs[R_CS].selector;
1270
    /* XXX: use SS segment size? */
1271
    PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1272
    PUSHW(ssp, esp, 0xffff, old_cs);
1273
    PUSHW(ssp, esp, 0xffff, old_eip);
1274

    
1275
    /* update processor state */
1276
    ESP = (ESP & ~0xffff) | (esp & 0xffff);
1277
    env->eip = offset;
1278
    env->segs[R_CS].selector = selector;
1279
    env->segs[R_CS].base = (selector << 4);
1280
    env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1281
}
1282

    
1283
#if defined(CONFIG_USER_ONLY)
1284
/* fake user mode interrupt */
1285
static void do_interrupt_user(int intno, int is_int, int error_code,
1286
                              target_ulong next_eip)
1287
{
1288
    SegmentCache *dt;
1289
    target_ulong ptr;
1290
    int dpl, cpl, shift;
1291
    uint32_t e2;
1292

    
1293
    dt = &env->idt;
1294
    if (env->hflags & HF_LMA_MASK) {
1295
        shift = 4;
1296
    } else {
1297
        shift = 3;
1298
    }
1299
    ptr = dt->base + (intno << shift);
1300
    e2 = ldl_kernel(ptr + 4);
1301

    
1302
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1303
    cpl = env->hflags & HF_CPL_MASK;
1304
    /* check privilege if software int */
1305
    if (is_int && dpl < cpl) {
1306
        raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1307
    }
1308

    
1309
    /* Since we emulate only user space, we cannot do more than
1310
       exiting the emulation with the suitable exception and error
1311
       code */
1312
    if (is_int) {
1313
        EIP = next_eip;
1314
    }
1315
}
1316

    
1317
#else
1318

    
1319
static void handle_even_inj(int intno, int is_int, int error_code,
1320
                            int is_hw, int rm)
1321
{
1322
    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1323
                                                          control.event_inj));
1324

    
1325
    if (!(event_inj & SVM_EVTINJ_VALID)) {
1326
        int type;
1327

    
1328
        if (is_int) {
1329
            type = SVM_EVTINJ_TYPE_SOFT;
1330
        } else {
1331
            type = SVM_EVTINJ_TYPE_EXEPT;
1332
        }
1333
        event_inj = intno | type | SVM_EVTINJ_VALID;
1334
        if (!rm && exception_has_error_code(intno)) {
1335
            event_inj |= SVM_EVTINJ_VALID_ERR;
1336
            stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1337
                                             control.event_inj_err),
1338
                     error_code);
1339
        }
1340
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1341
                 event_inj);
1342
    }
1343
}
1344
#endif
1345

    
1346
/*
1347
 * Begin execution of an interruption. is_int is TRUE if coming from
1348
 * the int instruction. next_eip is the EIP value AFTER the interrupt
1349
 * instruction. It is only relevant if is_int is TRUE.
1350
 */
1351
static void do_interrupt_all(int intno, int is_int, int error_code,
1352
                             target_ulong next_eip, int is_hw)
1353
{
1354
    if (qemu_loglevel_mask(CPU_LOG_INT)) {
1355
        if ((env->cr[0] & CR0_PE_MASK)) {
1356
            static int count;
1357

    
1358
            qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1359
                     " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1360
                     count, intno, error_code, is_int,
1361
                     env->hflags & HF_CPL_MASK,
1362
                     env->segs[R_CS].selector, EIP,
1363
                     (int)env->segs[R_CS].base + EIP,
1364
                     env->segs[R_SS].selector, ESP);
1365
            if (intno == 0x0e) {
1366
                qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1367
            } else {
1368
                qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1369
            }
1370
            qemu_log("\n");
1371
            log_cpu_state(env, X86_DUMP_CCOP);
1372
#if 0
1373
            {
1374
                int i;
1375
                target_ulong ptr;
1376

1377
                qemu_log("       code=");
1378
                ptr = env->segs[R_CS].base + env->eip;
1379
                for (i = 0; i < 16; i++) {
1380
                    qemu_log(" %02x", ldub(ptr + i));
1381
                }
1382
                qemu_log("\n");
1383
            }
1384
#endif
1385
            count++;
1386
        }
1387
    }
1388
    if (env->cr[0] & CR0_PE_MASK) {
1389
#if !defined(CONFIG_USER_ONLY)
1390
        if (env->hflags & HF_SVMI_MASK) {
1391
            handle_even_inj(intno, is_int, error_code, is_hw, 0);
1392
        }
1393
#endif
1394
#ifdef TARGET_X86_64
1395
        if (env->hflags & HF_LMA_MASK) {
1396
            do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1397
        } else
1398
#endif
1399
        {
1400
            do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1401
        }
1402
    } else {
1403
#if !defined(CONFIG_USER_ONLY)
1404
        if (env->hflags & HF_SVMI_MASK) {
1405
            handle_even_inj(intno, is_int, error_code, is_hw, 1);
1406
        }
1407
#endif
1408
        do_interrupt_real(intno, is_int, error_code, next_eip);
1409
    }
1410

    
1411
#if !defined(CONFIG_USER_ONLY)
1412
    if (env->hflags & HF_SVMI_MASK) {
1413
        uint32_t event_inj = ldl_phys(env->vm_vmcb +
1414
                                      offsetof(struct vmcb,
1415
                                               control.event_inj));
1416

    
1417
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1418
                 event_inj & ~SVM_EVTINJ_VALID);
1419
    }
1420
#endif
1421
}
1422

    
1423
void do_interrupt(CPUX86State *env1)
1424
{
1425
    CPUX86State *saved_env;
1426

    
1427
    saved_env = env;
1428
    env = env1;
1429
#if defined(CONFIG_USER_ONLY)
1430
    /* if user mode only, we simulate a fake exception
1431
       which will be handled outside the cpu execution
1432
       loop */
1433
    do_interrupt_user(env->exception_index,
1434
                      env->exception_is_int,
1435
                      env->error_code,
1436
                      env->exception_next_eip);
1437
    /* successfully delivered */
1438
    env->old_exception = -1;
1439
#else
1440
    /* simulate a real cpu exception. On i386, it can
1441
       trigger new exceptions, but we do not handle
1442
       double or triple faults yet. */
1443
    do_interrupt_all(env->exception_index,
1444
                     env->exception_is_int,
1445
                     env->error_code,
1446
                     env->exception_next_eip, 0);
1447
    /* successfully delivered */
1448
    env->old_exception = -1;
1449
#endif
1450
    env = saved_env;
1451
}
1452

    
1453
void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1454
{
1455
    CPUX86State *saved_env;
1456

    
1457
    saved_env = env;
1458
    env = env1;
1459
    do_interrupt_all(intno, 0, 0, 0, is_hw);
1460
    env = saved_env;
1461
}
1462

    
1463
/* SMM support */
1464

    
1465
#if defined(CONFIG_USER_ONLY)
1466

    
1467
void do_smm_enter(CPUX86State *env1)
1468
{
1469
}
1470

    
1471
void helper_rsm(void)
1472
{
1473
}
1474

    
1475
#else
1476

    
1477
#ifdef TARGET_X86_64
1478
#define SMM_REVISION_ID 0x00020064
1479
#else
1480
#define SMM_REVISION_ID 0x00020000
1481
#endif
1482

    
1483
void do_smm_enter(CPUX86State *env1)
1484
{
1485
    target_ulong sm_state;
1486
    SegmentCache *dt;
1487
    int i, offset;
1488
    CPUX86State *saved_env;
1489

    
1490
    saved_env = env;
1491
    env = env1;
1492

    
1493
    qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1494
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1495

    
1496
    env->hflags |= HF_SMM_MASK;
1497
    cpu_smm_update(env);
1498

    
1499
    sm_state = env->smbase + 0x8000;
1500

    
1501
#ifdef TARGET_X86_64
1502
    for (i = 0; i < 6; i++) {
1503
        dt = &env->segs[i];
1504
        offset = 0x7e00 + i * 16;
1505
        stw_phys(sm_state + offset, dt->selector);
1506
        stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1507
        stl_phys(sm_state + offset + 4, dt->limit);
1508
        stq_phys(sm_state + offset + 8, dt->base);
1509
    }
1510

    
1511
    stq_phys(sm_state + 0x7e68, env->gdt.base);
1512
    stl_phys(sm_state + 0x7e64, env->gdt.limit);
1513

    
1514
    stw_phys(sm_state + 0x7e70, env->ldt.selector);
1515
    stq_phys(sm_state + 0x7e78, env->ldt.base);
1516
    stl_phys(sm_state + 0x7e74, env->ldt.limit);
1517
    stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1518

    
1519
    stq_phys(sm_state + 0x7e88, env->idt.base);
1520
    stl_phys(sm_state + 0x7e84, env->idt.limit);
1521

    
1522
    stw_phys(sm_state + 0x7e90, env->tr.selector);
1523
    stq_phys(sm_state + 0x7e98, env->tr.base);
1524
    stl_phys(sm_state + 0x7e94, env->tr.limit);
1525
    stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1526

    
1527
    stq_phys(sm_state + 0x7ed0, env->efer);
1528

    
1529
    stq_phys(sm_state + 0x7ff8, EAX);
1530
    stq_phys(sm_state + 0x7ff0, ECX);
1531
    stq_phys(sm_state + 0x7fe8, EDX);
1532
    stq_phys(sm_state + 0x7fe0, EBX);
1533
    stq_phys(sm_state + 0x7fd8, ESP);
1534
    stq_phys(sm_state + 0x7fd0, EBP);
1535
    stq_phys(sm_state + 0x7fc8, ESI);
1536
    stq_phys(sm_state + 0x7fc0, EDI);
1537
    for (i = 8; i < 16; i++) {
1538
        stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1539
    }
1540
    stq_phys(sm_state + 0x7f78, env->eip);
1541
    stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
1542
    stl_phys(sm_state + 0x7f68, env->dr[6]);
1543
    stl_phys(sm_state + 0x7f60, env->dr[7]);
1544

    
1545
    stl_phys(sm_state + 0x7f48, env->cr[4]);
1546
    stl_phys(sm_state + 0x7f50, env->cr[3]);
1547
    stl_phys(sm_state + 0x7f58, env->cr[0]);
1548

    
1549
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1550
    stl_phys(sm_state + 0x7f00, env->smbase);
1551
#else
1552
    stl_phys(sm_state + 0x7ffc, env->cr[0]);
1553
    stl_phys(sm_state + 0x7ff8, env->cr[3]);
1554
    stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
1555
    stl_phys(sm_state + 0x7ff0, env->eip);
1556
    stl_phys(sm_state + 0x7fec, EDI);
1557
    stl_phys(sm_state + 0x7fe8, ESI);
1558
    stl_phys(sm_state + 0x7fe4, EBP);
1559
    stl_phys(sm_state + 0x7fe0, ESP);
1560
    stl_phys(sm_state + 0x7fdc, EBX);
1561
    stl_phys(sm_state + 0x7fd8, EDX);
1562
    stl_phys(sm_state + 0x7fd4, ECX);
1563
    stl_phys(sm_state + 0x7fd0, EAX);
1564
    stl_phys(sm_state + 0x7fcc, env->dr[6]);
1565
    stl_phys(sm_state + 0x7fc8, env->dr[7]);
1566

    
1567
    stl_phys(sm_state + 0x7fc4, env->tr.selector);
1568
    stl_phys(sm_state + 0x7f64, env->tr.base);
1569
    stl_phys(sm_state + 0x7f60, env->tr.limit);
1570
    stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1571

    
1572
    stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1573
    stl_phys(sm_state + 0x7f80, env->ldt.base);
1574
    stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1575
    stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1576

    
1577
    stl_phys(sm_state + 0x7f74, env->gdt.base);
1578
    stl_phys(sm_state + 0x7f70, env->gdt.limit);
1579

    
1580
    stl_phys(sm_state + 0x7f58, env->idt.base);
1581
    stl_phys(sm_state + 0x7f54, env->idt.limit);
1582

    
1583
    for (i = 0; i < 6; i++) {
1584
        dt = &env->segs[i];
1585
        if (i < 3) {
1586
            offset = 0x7f84 + i * 12;
1587
        } else {
1588
            offset = 0x7f2c + (i - 3) * 12;
1589
        }
1590
        stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1591
        stl_phys(sm_state + offset + 8, dt->base);
1592
        stl_phys(sm_state + offset + 4, dt->limit);
1593
        stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1594
    }
1595
    stl_phys(sm_state + 0x7f14, env->cr[4]);
1596

    
1597
    stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1598
    stl_phys(sm_state + 0x7ef8, env->smbase);
1599
#endif
1600
    /* init SMM cpu state */
1601

    
1602
#ifdef TARGET_X86_64
1603
    cpu_load_efer(env, 0);
1604
#endif
1605
    cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
1606
                              DF_MASK));
1607
    env->eip = 0x00008000;
1608
    cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1609
                           0xffffffff, 0);
1610
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1611
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1612
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1613
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1614
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1615

    
1616
    cpu_x86_update_cr0(env,
1617
                       env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1618
                                      CR0_PG_MASK));
1619
    cpu_x86_update_cr4(env, 0);
1620
    env->dr[7] = 0x00000400;
1621
    CC_OP = CC_OP_EFLAGS;
1622
    env = saved_env;
1623
}
1624

    
1625
void helper_rsm(void)
1626
{
1627
    target_ulong sm_state;
1628
    int i, offset;
1629
    uint32_t val;
1630

    
1631
    sm_state = env->smbase + 0x8000;
1632
#ifdef TARGET_X86_64
1633
    cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1634

    
1635
    for (i = 0; i < 6; i++) {
1636
        offset = 0x7e00 + i * 16;
1637
        cpu_x86_load_seg_cache(env, i,
1638
                               lduw_phys(sm_state + offset),
1639
                               ldq_phys(sm_state + offset + 8),
1640
                               ldl_phys(sm_state + offset + 4),
1641
                               (lduw_phys(sm_state + offset + 2) &
1642
                                0xf0ff) << 8);
1643
    }
1644

    
1645
    env->gdt.base = ldq_phys(sm_state + 0x7e68);
1646
    env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1647

    
1648
    env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1649
    env->ldt.base = ldq_phys(sm_state + 0x7e78);
1650
    env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1651
    env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1652

    
1653
    env->idt.base = ldq_phys(sm_state + 0x7e88);
1654
    env->idt.limit = ldl_phys(sm_state + 0x7e84);
1655

    
1656
    env->tr.selector = lduw_phys(sm_state + 0x7e90);
1657
    env->tr.base = ldq_phys(sm_state + 0x7e98);
1658
    env->tr.limit = ldl_phys(sm_state + 0x7e94);
1659
    env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1660

    
1661
    EAX = ldq_phys(sm_state + 0x7ff8);
1662
    ECX = ldq_phys(sm_state + 0x7ff0);
1663
    EDX = ldq_phys(sm_state + 0x7fe8);
1664
    EBX = ldq_phys(sm_state + 0x7fe0);
1665
    ESP = ldq_phys(sm_state + 0x7fd8);
1666
    EBP = ldq_phys(sm_state + 0x7fd0);
1667
    ESI = ldq_phys(sm_state + 0x7fc8);
1668
    EDI = ldq_phys(sm_state + 0x7fc0);
1669
    for (i = 8; i < 16; i++) {
1670
        env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1671
    }
1672
    env->eip = ldq_phys(sm_state + 0x7f78);
1673
    cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
1674
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1675
    env->dr[6] = ldl_phys(sm_state + 0x7f68);
1676
    env->dr[7] = ldl_phys(sm_state + 0x7f60);
1677

    
1678
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1679
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1680
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1681

    
1682
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1683
    if (val & 0x20000) {
1684
        env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1685
    }
1686
#else
1687
    cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1688
    cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1689
    cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
1690
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1691
    env->eip = ldl_phys(sm_state + 0x7ff0);
1692
    EDI = ldl_phys(sm_state + 0x7fec);
1693
    ESI = ldl_phys(sm_state + 0x7fe8);
1694
    EBP = ldl_phys(sm_state + 0x7fe4);
1695
    ESP = ldl_phys(sm_state + 0x7fe0);
1696
    EBX = ldl_phys(sm_state + 0x7fdc);
1697
    EDX = ldl_phys(sm_state + 0x7fd8);
1698
    ECX = ldl_phys(sm_state + 0x7fd4);
1699
    EAX = ldl_phys(sm_state + 0x7fd0);
1700
    env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1701
    env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1702

    
1703
    env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1704
    env->tr.base = ldl_phys(sm_state + 0x7f64);
1705
    env->tr.limit = ldl_phys(sm_state + 0x7f60);
1706
    env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1707

    
1708
    env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1709
    env->ldt.base = ldl_phys(sm_state + 0x7f80);
1710
    env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1711
    env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1712

    
1713
    env->gdt.base = ldl_phys(sm_state + 0x7f74);
1714
    env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1715

    
1716
    env->idt.base = ldl_phys(sm_state + 0x7f58);
1717
    env->idt.limit = ldl_phys(sm_state + 0x7f54);
1718

    
1719
    for (i = 0; i < 6; i++) {
1720
        if (i < 3) {
1721
            offset = 0x7f84 + i * 12;
1722
        } else {
1723
            offset = 0x7f2c + (i - 3) * 12;
1724
        }
1725
        cpu_x86_load_seg_cache(env, i,
1726
                               ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1727
                               ldl_phys(sm_state + offset + 8),
1728
                               ldl_phys(sm_state + offset + 4),
1729
                               (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1730
    }
1731
    cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1732

    
1733
    val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1734
    if (val & 0x20000) {
1735
        env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1736
    }
1737
#endif
1738
    CC_OP = CC_OP_EFLAGS;
1739
    env->hflags &= ~HF_SMM_MASK;
1740
    cpu_smm_update(env);
1741

    
1742
    qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1743
    log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1744
}
1745

    
1746
#endif /* !CONFIG_USER_ONLY */
1747

    
1748

    
1749
/* division, flags are undefined */
1750

    
1751
void helper_divb_AL(target_ulong t0)
1752
{
1753
    unsigned int num, den, q, r;
1754

    
1755
    num = (EAX & 0xffff);
1756
    den = (t0 & 0xff);
1757
    if (den == 0) {
1758
        raise_exception(env, EXCP00_DIVZ);
1759
    }
1760
    q = (num / den);
1761
    if (q > 0xff) {
1762
        raise_exception(env, EXCP00_DIVZ);
1763
    }
1764
    q &= 0xff;
1765
    r = (num % den) & 0xff;
1766
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1767
}
1768

    
1769
void helper_idivb_AL(target_ulong t0)
1770
{
1771
    int num, den, q, r;
1772

    
1773
    num = (int16_t)EAX;
1774
    den = (int8_t)t0;
1775
    if (den == 0) {
1776
        raise_exception(env, EXCP00_DIVZ);
1777
    }
1778
    q = (num / den);
1779
    if (q != (int8_t)q) {
1780
        raise_exception(env, EXCP00_DIVZ);
1781
    }
1782
    q &= 0xff;
1783
    r = (num % den) & 0xff;
1784
    EAX = (EAX & ~0xffff) | (r << 8) | q;
1785
}
1786

    
1787
void helper_divw_AX(target_ulong t0)
1788
{
1789
    unsigned int num, den, q, r;
1790

    
1791
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1792
    den = (t0 & 0xffff);
1793
    if (den == 0) {
1794
        raise_exception(env, EXCP00_DIVZ);
1795
    }
1796
    q = (num / den);
1797
    if (q > 0xffff) {
1798
        raise_exception(env, EXCP00_DIVZ);
1799
    }
1800
    q &= 0xffff;
1801
    r = (num % den) & 0xffff;
1802
    EAX = (EAX & ~0xffff) | q;
1803
    EDX = (EDX & ~0xffff) | r;
1804
}
1805

    
1806
void helper_idivw_AX(target_ulong t0)
1807
{
1808
    int num, den, q, r;
1809

    
1810
    num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1811
    den = (int16_t)t0;
1812
    if (den == 0) {
1813
        raise_exception(env, EXCP00_DIVZ);
1814
    }
1815
    q = (num / den);
1816
    if (q != (int16_t)q) {
1817
        raise_exception(env, EXCP00_DIVZ);
1818
    }
1819
    q &= 0xffff;
1820
    r = (num % den) & 0xffff;
1821
    EAX = (EAX & ~0xffff) | q;
1822
    EDX = (EDX & ~0xffff) | r;
1823
}
1824

    
1825
void helper_divl_EAX(target_ulong t0)
1826
{
1827
    unsigned int den, r;
1828
    uint64_t num, q;
1829

    
1830
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1831
    den = t0;
1832
    if (den == 0) {
1833
        raise_exception(env, EXCP00_DIVZ);
1834
    }
1835
    q = (num / den);
1836
    r = (num % den);
1837
    if (q > 0xffffffff) {
1838
        raise_exception(env, EXCP00_DIVZ);
1839
    }
1840
    EAX = (uint32_t)q;
1841
    EDX = (uint32_t)r;
1842
}
1843

    
1844
void helper_idivl_EAX(target_ulong t0)
1845
{
1846
    int den, r;
1847
    int64_t num, q;
1848

    
1849
    num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1850
    den = t0;
1851
    if (den == 0) {
1852
        raise_exception(env, EXCP00_DIVZ);
1853
    }
1854
    q = (num / den);
1855
    r = (num % den);
1856
    if (q != (int32_t)q) {
1857
        raise_exception(env, EXCP00_DIVZ);
1858
    }
1859
    EAX = (uint32_t)q;
1860
    EDX = (uint32_t)r;
1861
}
1862

    
1863
/* bcd */
1864

    
1865
/* XXX: exception */
1866
void helper_aam(int base)
1867
{
1868
    int al, ah;
1869

    
1870
    al = EAX & 0xff;
1871
    ah = al / base;
1872
    al = al % base;
1873
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1874
    CC_DST = al;
1875
}
1876

    
1877
void helper_aad(int base)
1878
{
1879
    int al, ah;
1880

    
1881
    al = EAX & 0xff;
1882
    ah = (EAX >> 8) & 0xff;
1883
    al = ((ah * base) + al) & 0xff;
1884
    EAX = (EAX & ~0xffff) | al;
1885
    CC_DST = al;
1886
}
1887

    
1888
void helper_aaa(void)
1889
{
1890
    int icarry;
1891
    int al, ah, af;
1892
    int eflags;
1893

    
1894
    eflags = helper_cc_compute_all(CC_OP);
1895
    af = eflags & CC_A;
1896
    al = EAX & 0xff;
1897
    ah = (EAX >> 8) & 0xff;
1898

    
1899
    icarry = (al > 0xf9);
1900
    if (((al & 0x0f) > 9) || af) {
1901
        al = (al + 6) & 0x0f;
1902
        ah = (ah + 1 + icarry) & 0xff;
1903
        eflags |= CC_C | CC_A;
1904
    } else {
1905
        eflags &= ~(CC_C | CC_A);
1906
        al &= 0x0f;
1907
    }
1908
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1909
    CC_SRC = eflags;
1910
}
1911

    
1912
void helper_aas(void)
1913
{
1914
    int icarry;
1915
    int al, ah, af;
1916
    int eflags;
1917

    
1918
    eflags = helper_cc_compute_all(CC_OP);
1919
    af = eflags & CC_A;
1920
    al = EAX & 0xff;
1921
    ah = (EAX >> 8) & 0xff;
1922

    
1923
    icarry = (al < 6);
1924
    if (((al & 0x0f) > 9) || af) {
1925
        al = (al - 6) & 0x0f;
1926
        ah = (ah - 1 - icarry) & 0xff;
1927
        eflags |= CC_C | CC_A;
1928
    } else {
1929
        eflags &= ~(CC_C | CC_A);
1930
        al &= 0x0f;
1931
    }
1932
    EAX = (EAX & ~0xffff) | al | (ah << 8);
1933
    CC_SRC = eflags;
1934
}
1935

    
1936
void helper_daa(void)
1937
{
1938
    int old_al, al, af, cf;
1939
    int eflags;
1940

    
1941
    eflags = helper_cc_compute_all(CC_OP);
1942
    cf = eflags & CC_C;
1943
    af = eflags & CC_A;
1944
    old_al = al = EAX & 0xff;
1945

    
1946
    eflags = 0;
1947
    if (((al & 0x0f) > 9) || af) {
1948
        al = (al + 6) & 0xff;
1949
        eflags |= CC_A;
1950
    }
1951
    if ((old_al > 0x99) || cf) {
1952
        al = (al + 0x60) & 0xff;
1953
        eflags |= CC_C;
1954
    }
1955
    EAX = (EAX & ~0xff) | al;
1956
    /* well, speed is not an issue here, so we compute the flags by hand */
1957
    eflags |= (al == 0) << 6; /* zf */
1958
    eflags |= parity_table[al]; /* pf */
1959
    eflags |= (al & 0x80); /* sf */
1960
    CC_SRC = eflags;
1961
}
1962

    
1963
void helper_das(void)
1964
{
1965
    int al, al1, af, cf;
1966
    int eflags;
1967

    
1968
    eflags = helper_cc_compute_all(CC_OP);
1969
    cf = eflags & CC_C;
1970
    af = eflags & CC_A;
1971
    al = EAX & 0xff;
1972

    
1973
    eflags = 0;
1974
    al1 = al;
1975
    if (((al & 0x0f) > 9) || af) {
1976
        eflags |= CC_A;
1977
        if (al < 6 || cf) {
1978
            eflags |= CC_C;
1979
        }
1980
        al = (al - 6) & 0xff;
1981
    }
1982
    if ((al1 > 0x99) || cf) {
1983
        al = (al - 0x60) & 0xff;
1984
        eflags |= CC_C;
1985
    }
1986
    EAX = (EAX & ~0xff) | al;
1987
    /* well, speed is not an issue here, so we compute the flags by hand */
1988
    eflags |= (al == 0) << 6; /* zf */
1989
    eflags |= parity_table[al]; /* pf */
1990
    eflags |= (al & 0x80); /* sf */
1991
    CC_SRC = eflags;
1992
}
1993

    
1994
void helper_into(int next_eip_addend)
1995
{
1996
    int eflags;
1997

    
1998
    eflags = helper_cc_compute_all(CC_OP);
1999
    if (eflags & CC_O) {
2000
        raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
2001
    }
2002
}
2003

    
2004
void helper_cmpxchg8b(target_ulong a0)
2005
{
2006
    uint64_t d;
2007
    int eflags;
2008

    
2009
    eflags = helper_cc_compute_all(CC_OP);
2010
    d = ldq(a0);
2011
    if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2012
        stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2013
        eflags |= CC_Z;
2014
    } else {
2015
        /* always do the store */
2016
        stq(a0, d);
2017
        EDX = (uint32_t)(d >> 32);
2018
        EAX = (uint32_t)d;
2019
        eflags &= ~CC_Z;
2020
    }
2021
    CC_SRC = eflags;
2022
}
2023

    
2024
#ifdef TARGET_X86_64
2025
void helper_cmpxchg16b(target_ulong a0)
2026
{
2027
    uint64_t d0, d1;
2028
    int eflags;
2029

    
2030
    if ((a0 & 0xf) != 0) {
2031
        raise_exception(env, EXCP0D_GPF);
2032
    }
2033
    eflags = helper_cc_compute_all(CC_OP);
2034
    d0 = ldq(a0);
2035
    d1 = ldq(a0 + 8);
2036
    if (d0 == EAX && d1 == EDX) {
2037
        stq(a0, EBX);
2038
        stq(a0 + 8, ECX);
2039
        eflags |= CC_Z;
2040
    } else {
2041
        /* always do the store */
2042
        stq(a0, d0);
2043
        stq(a0 + 8, d1);
2044
        EDX = d1;
2045
        EAX = d0;
2046
        eflags &= ~CC_Z;
2047
    }
2048
    CC_SRC = eflags;
2049
}
2050
#endif
2051

    
2052
void helper_single_step(void)
2053
{
2054
#ifndef CONFIG_USER_ONLY
2055
    check_hw_breakpoints(env, 1);
2056
    env->dr[6] |= DR6_BS;
2057
#endif
2058
    raise_exception(env, EXCP01_DB);
2059
}
2060

    
2061
void helper_cpuid(void)
2062
{
2063
    uint32_t eax, ebx, ecx, edx;
2064

    
2065
    helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2066

    
2067
    cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2068
    EAX = eax;
2069
    EBX = ebx;
2070
    ECX = ecx;
2071
    EDX = edx;
2072
}
2073

    
2074
void helper_enter_level(int level, int data32, target_ulong t1)
2075
{
2076
    target_ulong ssp;
2077
    uint32_t esp_mask, esp, ebp;
2078

    
2079
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2080
    ssp = env->segs[R_SS].base;
2081
    ebp = EBP;
2082
    esp = ESP;
2083
    if (data32) {
2084
        /* 32 bit */
2085
        esp -= 4;
2086
        while (--level) {
2087
            esp -= 4;
2088
            ebp -= 4;
2089
            stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2090
        }
2091
        esp -= 4;
2092
        stl(ssp + (esp & esp_mask), t1);
2093
    } else {
2094
        /* 16 bit */
2095
        esp -= 2;
2096
        while (--level) {
2097
            esp -= 2;
2098
            ebp -= 2;
2099
            stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2100
        }
2101
        esp -= 2;
2102
        stw(ssp + (esp & esp_mask), t1);
2103
    }
2104
}
2105

    
2106
#ifdef TARGET_X86_64
2107
void helper_enter64_level(int level, int data64, target_ulong t1)
2108
{
2109
    target_ulong esp, ebp;
2110

    
2111
    ebp = EBP;
2112
    esp = ESP;
2113

    
2114
    if (data64) {
2115
        /* 64 bit */
2116
        esp -= 8;
2117
        while (--level) {
2118
            esp -= 8;
2119
            ebp -= 8;
2120
            stq(esp, ldq(ebp));
2121
        }
2122
        esp -= 8;
2123
        stq(esp, t1);
2124
    } else {
2125
        /* 16 bit */
2126
        esp -= 2;
2127
        while (--level) {
2128
            esp -= 2;
2129
            ebp -= 2;
2130
            stw(esp, lduw(ebp));
2131
        }
2132
        esp -= 2;
2133
        stw(esp, t1);
2134
    }
2135
}
2136
#endif
2137

    
2138
void helper_lldt(int selector)
2139
{
2140
    SegmentCache *dt;
2141
    uint32_t e1, e2;
2142
    int index, entry_limit;
2143
    target_ulong ptr;
2144

    
2145
    selector &= 0xffff;
2146
    if ((selector & 0xfffc) == 0) {
2147
        /* XXX: NULL selector case: invalid LDT */
2148
        env->ldt.base = 0;
2149
        env->ldt.limit = 0;
2150
    } else {
2151
        if (selector & 0x4) {
2152
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2153
        }
2154
        dt = &env->gdt;
2155
        index = selector & ~7;
2156
#ifdef TARGET_X86_64
2157
        if (env->hflags & HF_LMA_MASK) {
2158
            entry_limit = 15;
2159
        } else
2160
#endif
2161
        {
2162
            entry_limit = 7;
2163
        }
2164
        if ((index + entry_limit) > dt->limit) {
2165
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2166
        }
2167
        ptr = dt->base + index;
2168
        e1 = ldl_kernel(ptr);
2169
        e2 = ldl_kernel(ptr + 4);
2170
        if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
2171
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2172
        }
2173
        if (!(e2 & DESC_P_MASK)) {
2174
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2175
        }
2176
#ifdef TARGET_X86_64
2177
        if (env->hflags & HF_LMA_MASK) {
2178
            uint32_t e3;
2179

    
2180
            e3 = ldl_kernel(ptr + 8);
2181
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2182
            env->ldt.base |= (target_ulong)e3 << 32;
2183
        } else
2184
#endif
2185
        {
2186
            load_seg_cache_raw_dt(&env->ldt, e1, e2);
2187
        }
2188
    }
2189
    env->ldt.selector = selector;
2190
}
2191

    
2192
void helper_ltr(int selector)
2193
{
2194
    SegmentCache *dt;
2195
    uint32_t e1, e2;
2196
    int index, type, entry_limit;
2197
    target_ulong ptr;
2198

    
2199
    selector &= 0xffff;
2200
    if ((selector & 0xfffc) == 0) {
2201
        /* NULL selector case: invalid TR */
2202
        env->tr.base = 0;
2203
        env->tr.limit = 0;
2204
        env->tr.flags = 0;
2205
    } else {
2206
        if (selector & 0x4) {
2207
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2208
        }
2209
        dt = &env->gdt;
2210
        index = selector & ~7;
2211
#ifdef TARGET_X86_64
2212
        if (env->hflags & HF_LMA_MASK) {
2213
            entry_limit = 15;
2214
        } else
2215
#endif
2216
        {
2217
            entry_limit = 7;
2218
        }
2219
        if ((index + entry_limit) > dt->limit) {
2220
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2221
        }
2222
        ptr = dt->base + index;
2223
        e1 = ldl_kernel(ptr);
2224
        e2 = ldl_kernel(ptr + 4);
2225
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2226
        if ((e2 & DESC_S_MASK) ||
2227
            (type != 1 && type != 9)) {
2228
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2229
        }
2230
        if (!(e2 & DESC_P_MASK)) {
2231
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2232
        }
2233
#ifdef TARGET_X86_64
2234
        if (env->hflags & HF_LMA_MASK) {
2235
            uint32_t e3, e4;
2236

    
2237
            e3 = ldl_kernel(ptr + 8);
2238
            e4 = ldl_kernel(ptr + 12);
2239
            if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
2240
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2241
            }
2242
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2243
            env->tr.base |= (target_ulong)e3 << 32;
2244
        } else
2245
#endif
2246
        {
2247
            load_seg_cache_raw_dt(&env->tr, e1, e2);
2248
        }
2249
        e2 |= DESC_TSS_BUSY_MASK;
2250
        stl_kernel(ptr + 4, e2);
2251
    }
2252
    env->tr.selector = selector;
2253
}
2254

    
2255
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2256
void helper_load_seg(int seg_reg, int selector)
2257
{
2258
    uint32_t e1, e2;
2259
    int cpl, dpl, rpl;
2260
    SegmentCache *dt;
2261
    int index;
2262
    target_ulong ptr;
2263

    
2264
    selector &= 0xffff;
2265
    cpl = env->hflags & HF_CPL_MASK;
2266
    if ((selector & 0xfffc) == 0) {
2267
        /* null selector case */
2268
        if (seg_reg == R_SS
2269
#ifdef TARGET_X86_64
2270
            && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2271
#endif
2272
            ) {
2273
            raise_exception_err(env, EXCP0D_GPF, 0);
2274
        }
2275
        cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2276
    } else {
2277

    
2278
        if (selector & 0x4) {
2279
            dt = &env->ldt;
2280
        } else {
2281
            dt = &env->gdt;
2282
        }
2283
        index = selector & ~7;
2284
        if ((index + 7) > dt->limit) {
2285
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2286
        }
2287
        ptr = dt->base + index;
2288
        e1 = ldl_kernel(ptr);
2289
        e2 = ldl_kernel(ptr + 4);
2290

    
2291
        if (!(e2 & DESC_S_MASK)) {
2292
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2293
        }
2294
        rpl = selector & 3;
2295
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2296
        if (seg_reg == R_SS) {
2297
            /* must be writable segment */
2298
            if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
2299
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2300
            }
2301
            if (rpl != cpl || dpl != cpl) {
2302
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2303
            }
2304
        } else {
2305
            /* must be readable segment */
2306
            if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
2307
                raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2308
            }
2309

    
2310
            if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2311
                /* if not conforming code, test rights */
2312
                if (dpl < cpl || dpl < rpl) {
2313
                    raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2314
                }
2315
            }
2316
        }
2317

    
2318
        if (!(e2 & DESC_P_MASK)) {
2319
            if (seg_reg == R_SS) {
2320
                raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
2321
            } else {
2322
                raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2323
            }
2324
        }
2325

    
2326
        /* set the access bit if not already set */
2327
        if (!(e2 & DESC_A_MASK)) {
2328
            e2 |= DESC_A_MASK;
2329
            stl_kernel(ptr + 4, e2);
2330
        }
2331

    
2332
        cpu_x86_load_seg_cache(env, seg_reg, selector,
2333
                       get_seg_base(e1, e2),
2334
                       get_seg_limit(e1, e2),
2335
                       e2);
2336
#if 0
2337
        qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2338
                selector, (unsigned long)sc->base, sc->limit, sc->flags);
2339
#endif
2340
    }
2341
}
2342

    
2343
/* protected mode jump */
2344
void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2345
                           int next_eip_addend)
2346
{
2347
    int gate_cs, type;
2348
    uint32_t e1, e2, cpl, dpl, rpl, limit;
2349
    target_ulong next_eip;
2350

    
2351
    if ((new_cs & 0xfffc) == 0) {
2352
        raise_exception_err(env, EXCP0D_GPF, 0);
2353
    }
2354
    if (load_segment(&e1, &e2, new_cs) != 0) {
2355
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2356
    }
2357
    cpl = env->hflags & HF_CPL_MASK;
2358
    if (e2 & DESC_S_MASK) {
2359
        if (!(e2 & DESC_CS_MASK)) {
2360
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2361
        }
2362
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2363
        if (e2 & DESC_C_MASK) {
2364
            /* conforming code segment */
2365
            if (dpl > cpl) {
2366
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2367
            }
2368
        } else {
2369
            /* non conforming code segment */
2370
            rpl = new_cs & 3;
2371
            if (rpl > cpl) {
2372
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2373
            }
2374
            if (dpl != cpl) {
2375
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2376
            }
2377
        }
2378
        if (!(e2 & DESC_P_MASK)) {
2379
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2380
        }
2381
        limit = get_seg_limit(e1, e2);
2382
        if (new_eip > limit &&
2383
            !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
2384
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2385
        }
2386
        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2387
                       get_seg_base(e1, e2), limit, e2);
2388
        EIP = new_eip;
2389
    } else {
2390
        /* jump to call or task gate */
2391
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2392
        rpl = new_cs & 3;
2393
        cpl = env->hflags & HF_CPL_MASK;
2394
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2395
        switch (type) {
2396
        case 1: /* 286 TSS */
2397
        case 9: /* 386 TSS */
2398
        case 5: /* task gate */
2399
            if (dpl < cpl || dpl < rpl) {
2400
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2401
            }
2402
            next_eip = env->eip + next_eip_addend;
2403
            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2404
            CC_OP = CC_OP_EFLAGS;
2405
            break;
2406
        case 4: /* 286 call gate */
2407
        case 12: /* 386 call gate */
2408
            if ((dpl < cpl) || (dpl < rpl)) {
2409
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2410
            }
2411
            if (!(e2 & DESC_P_MASK)) {
2412
                raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2413
            }
2414
            gate_cs = e1 >> 16;
2415
            new_eip = (e1 & 0xffff);
2416
            if (type == 12) {
2417
                new_eip |= (e2 & 0xffff0000);
2418
            }
2419
            if (load_segment(&e1, &e2, gate_cs) != 0) {
2420
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2421
            }
2422
            dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2423
            /* must be code segment */
2424
            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2425
                 (DESC_S_MASK | DESC_CS_MASK))) {
2426
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2427
            }
2428
            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2429
                (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
2430
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2431
            }
2432
            if (!(e2 & DESC_P_MASK)) {
2433
                raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2434
            }
2435
            limit = get_seg_limit(e1, e2);
2436
            if (new_eip > limit) {
2437
                raise_exception_err(env, EXCP0D_GPF, 0);
2438
            }
2439
            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2440
                                   get_seg_base(e1, e2), limit, e2);
2441
            EIP = new_eip;
2442
            break;
2443
        default:
2444
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2445
            break;
2446
        }
2447
    }
2448
}
2449

    
2450
/* real mode call */
2451
void helper_lcall_real(int new_cs, target_ulong new_eip1,
2452
                       int shift, int next_eip)
2453
{
2454
    int new_eip;
2455
    uint32_t esp, esp_mask;
2456
    target_ulong ssp;
2457

    
2458
    new_eip = new_eip1;
2459
    esp = ESP;
2460
    esp_mask = get_sp_mask(env->segs[R_SS].flags);
2461
    ssp = env->segs[R_SS].base;
2462
    if (shift) {
2463
        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2464
        PUSHL(ssp, esp, esp_mask, next_eip);
2465
    } else {
2466
        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2467
        PUSHW(ssp, esp, esp_mask, next_eip);
2468
    }
2469

    
2470
    SET_ESP(esp, esp_mask);
2471
    env->eip = new_eip;
2472
    env->segs[R_CS].selector = new_cs;
2473
    env->segs[R_CS].base = (new_cs << 4);
2474
}
2475

    
2476
/* protected mode call */
2477
void helper_lcall_protected(int new_cs, target_ulong new_eip,
2478
                            int shift, int next_eip_addend)
2479
{
2480
    int new_stack, i;
2481
    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2482
    uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2483
    uint32_t val, limit, old_sp_mask;
2484
    target_ulong ssp, old_ssp, next_eip;
2485

    
2486
    next_eip = env->eip + next_eip_addend;
2487
    LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2488
    LOG_PCALL_STATE(env);
2489
    if ((new_cs & 0xfffc) == 0) {
2490
        raise_exception_err(env, EXCP0D_GPF, 0);
2491
    }
2492
    if (load_segment(&e1, &e2, new_cs) != 0) {
2493
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2494
    }
2495
    cpl = env->hflags & HF_CPL_MASK;
2496
    LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2497
    if (e2 & DESC_S_MASK) {
2498
        if (!(e2 & DESC_CS_MASK)) {
2499
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2500
        }
2501
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2502
        if (e2 & DESC_C_MASK) {
2503
            /* conforming code segment */
2504
            if (dpl > cpl) {
2505
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2506
            }
2507
        } else {
2508
            /* non conforming code segment */
2509
            rpl = new_cs & 3;
2510
            if (rpl > cpl) {
2511
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2512
            }
2513
            if (dpl != cpl) {
2514
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2515
            }
2516
        }
2517
        if (!(e2 & DESC_P_MASK)) {
2518
            raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2519
        }
2520

    
2521
#ifdef TARGET_X86_64
2522
        /* XXX: check 16/32 bit cases in long mode */
2523
        if (shift == 2) {
2524
            target_ulong rsp;
2525

    
2526
            /* 64 bit case */
2527
            rsp = ESP;
2528
            PUSHQ(rsp, env->segs[R_CS].selector);
2529
            PUSHQ(rsp, next_eip);
2530
            /* from this point, not restartable */
2531
            ESP = rsp;
2532
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2533
                                   get_seg_base(e1, e2),
2534
                                   get_seg_limit(e1, e2), e2);
2535
            EIP = new_eip;
2536
        } else
2537
#endif
2538
        {
2539
            sp = ESP;
2540
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2541
            ssp = env->segs[R_SS].base;
2542
            if (shift) {
2543
                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2544
                PUSHL(ssp, sp, sp_mask, next_eip);
2545
            } else {
2546
                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2547
                PUSHW(ssp, sp, sp_mask, next_eip);
2548
            }
2549

    
2550
            limit = get_seg_limit(e1, e2);
2551
            if (new_eip > limit) {
2552
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2553
            }
2554
            /* from this point, not restartable */
2555
            SET_ESP(sp, sp_mask);
2556
            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2557
                                   get_seg_base(e1, e2), limit, e2);
2558
            EIP = new_eip;
2559
        }
2560
    } else {
2561
        /* check gate type */
2562
        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2563
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2564
        rpl = new_cs & 3;
2565
        switch (type) {
2566
        case 1: /* available 286 TSS */
2567
        case 9: /* available 386 TSS */
2568
        case 5: /* task gate */
2569
            if (dpl < cpl || dpl < rpl) {
2570
                raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2571
            }
2572
            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2573
            CC_OP = CC_OP_EFLAGS;
2574
            return;
2575
        case 4: /* 286 call gate */
2576
        case 12: /* 386 call gate */
2577
            break;
2578
        default:
2579
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2580
            break;
2581
        }
2582
        shift = type >> 3;
2583

    
2584
        if (dpl < cpl || dpl < rpl) {
2585
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2586
        }
2587
        /* check valid bit */
2588
        if (!(e2 & DESC_P_MASK)) {
2589
            raise_exception_err(env, EXCP0B_NOSEG,  new_cs & 0xfffc);
2590
        }
2591
        selector = e1 >> 16;
2592
        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2593
        param_count = e2 & 0x1f;
2594
        if ((selector & 0xfffc) == 0) {
2595
            raise_exception_err(env, EXCP0D_GPF, 0);
2596
        }
2597

    
2598
        if (load_segment(&e1, &e2, selector) != 0) {
2599
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2600
        }
2601
        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
2602
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2603
        }
2604
        dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2605
        if (dpl > cpl) {
2606
            raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2607
        }
2608
        if (!(e2 & DESC_P_MASK)) {
2609
            raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2610
        }
2611

    
2612
        if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2613
            /* to inner privilege */
2614
            get_ss_esp_from_tss(&ss, &sp, dpl);
2615
            LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2616
                      "\n",
2617
                      ss, sp, param_count, ESP);
2618
            if ((ss & 0xfffc) == 0) {
2619
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2620
            }
2621
            if ((ss & 3) != dpl) {
2622
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2623
            }
2624
            if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
2625
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2626
            }
2627
            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2628
            if (ss_dpl != dpl) {
2629
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2630
            }
2631
            if (!(ss_e2 & DESC_S_MASK) ||
2632
                (ss_e2 & DESC_CS_MASK) ||
2633
                !(ss_e2 & DESC_W_MASK)) {
2634
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2635
            }
2636
            if (!(ss_e2 & DESC_P_MASK)) {
2637
                raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2638
            }
2639

    
2640
            /* push_size = ((param_count * 2) + 8) << shift; */
2641

    
2642
            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2643
            old_ssp = env->segs[R_SS].base;
2644

    
2645
            sp_mask = get_sp_mask(ss_e2);
2646
            ssp = get_seg_base(ss_e1, ss_e2);
2647
            if (shift) {
2648
                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2649
                PUSHL(ssp, sp, sp_mask, ESP);
2650
                for (i = param_count - 1; i >= 0; i--) {
2651
                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2652
                    PUSHL(ssp, sp, sp_mask, val);
2653
                }
2654
            } else {
2655
                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2656
                PUSHW(ssp, sp, sp_mask, ESP);
2657
                for (i = param_count - 1; i >= 0; i--) {
2658
                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2659
                    PUSHW(ssp, sp, sp_mask, val);
2660
                }
2661
            }
2662
            new_stack = 1;
2663
        } else {
2664
            /* to same privilege */
2665
            sp = ESP;
2666
            sp_mask = get_sp_mask(env->segs[R_SS].flags);
2667
            ssp = env->segs[R_SS].base;
2668
            /* push_size = (4 << shift); */
2669
            new_stack = 0;
2670
        }
2671

    
2672
        if (shift) {
2673
            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2674
            PUSHL(ssp, sp, sp_mask, next_eip);
2675
        } else {
2676
            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2677
            PUSHW(ssp, sp, sp_mask, next_eip);
2678
        }
2679

    
2680
        /* from this point, not restartable */
2681

    
2682
        if (new_stack) {
2683
            ss = (ss & ~3) | dpl;
2684
            cpu_x86_load_seg_cache(env, R_SS, ss,
2685
                                   ssp,
2686
                                   get_seg_limit(ss_e1, ss_e2),
2687
                                   ss_e2);
2688
        }
2689

    
2690
        selector = (selector & ~3) | dpl;
2691
        cpu_x86_load_seg_cache(env, R_CS, selector,
2692
                       get_seg_base(e1, e2),
2693
                       get_seg_limit(e1, e2),
2694
                       e2);
2695
        cpu_x86_set_cpl(env, dpl);
2696
        SET_ESP(sp, sp_mask);
2697
        EIP = offset;
2698
    }
2699
}
2700

    
2701
/* real and vm86 mode iret */
2702
void helper_iret_real(int shift)
2703
{
2704
    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2705
    target_ulong ssp;
2706
    int eflags_mask;
2707

    
2708
    sp_mask = 0xffff; /* XXXX: use SS segment size? */
2709
    sp = ESP;
2710
    ssp = env->segs[R_SS].base;
2711
    if (shift == 1) {
2712
        /* 32 bits */
2713
        POPL(ssp, sp, sp_mask, new_eip);
2714
        POPL(ssp, sp, sp_mask, new_cs);
2715
        new_cs &= 0xffff;
2716
        POPL(ssp, sp, sp_mask, new_eflags);
2717
    } else {
2718
        /* 16 bits */
2719
        POPW(ssp, sp, sp_mask, new_eip);
2720
        POPW(ssp, sp, sp_mask, new_cs);
2721
        POPW(ssp, sp, sp_mask, new_eflags);
2722
    }
2723
    ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2724
    env->segs[R_CS].selector = new_cs;
2725
    env->segs[R_CS].base = (new_cs << 4);
2726
    env->eip = new_eip;
2727
    if (env->eflags & VM_MASK) {
2728
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2729
            NT_MASK;
2730
    } else {
2731
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2732
            RF_MASK | NT_MASK;
2733
    }
2734
    if (shift == 0) {
2735
        eflags_mask &= 0xffff;
2736
    }
2737
    cpu_load_eflags(env, new_eflags, eflags_mask);
2738
    env->hflags2 &= ~HF2_NMI_MASK;
2739
}
2740

    
2741
static inline void validate_seg(int seg_reg, int cpl)
2742
{
2743
    int dpl;
2744
    uint32_t e2;
2745

    
2746
    /* XXX: on x86_64, we do not want to nullify FS and GS because
2747
       they may still contain a valid base. I would be interested to
2748
       know how a real x86_64 CPU behaves */
2749
    if ((seg_reg == R_FS || seg_reg == R_GS) &&
2750
        (env->segs[seg_reg].selector & 0xfffc) == 0) {
2751
        return;
2752
    }
2753

    
2754
    e2 = env->segs[seg_reg].flags;
2755
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2756
    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2757
        /* data or non conforming code segment */
2758
        if (dpl < cpl) {
2759
            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2760
        }
2761
    }
2762
}
2763

    
2764
/* protected mode iret */
2765
static inline void helper_ret_protected(int shift, int is_iret, int addend)
2766
{
2767
    uint32_t new_cs, new_eflags, new_ss;
2768
    uint32_t new_es, new_ds, new_fs, new_gs;
2769
    uint32_t e1, e2, ss_e1, ss_e2;
2770
    int cpl, dpl, rpl, eflags_mask, iopl;
2771
    target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2772

    
2773
#ifdef TARGET_X86_64
2774
    if (shift == 2) {
2775
        sp_mask = -1;
2776
    } else
2777
#endif
2778
    {
2779
        sp_mask = get_sp_mask(env->segs[R_SS].flags);
2780
    }
2781
    sp = ESP;
2782
    ssp = env->segs[R_SS].base;
2783
    new_eflags = 0; /* avoid warning */
2784
#ifdef TARGET_X86_64
2785
    if (shift == 2) {
2786
        POPQ(sp, new_eip);
2787
        POPQ(sp, new_cs);
2788
        new_cs &= 0xffff;
2789
        if (is_iret) {
2790
            POPQ(sp, new_eflags);
2791
        }
2792
    } else
2793
#endif
2794
    {
2795
        if (shift == 1) {
2796
            /* 32 bits */
2797
            POPL(ssp, sp, sp_mask, new_eip);
2798
            POPL(ssp, sp, sp_mask, new_cs);
2799
            new_cs &= 0xffff;
2800
            if (is_iret) {
2801
                POPL(ssp, sp, sp_mask, new_eflags);
2802
                if (new_eflags & VM_MASK) {
2803
                    goto return_to_vm86;
2804
                }
2805
            }
2806
        } else {
2807
            /* 16 bits */
2808
            POPW(ssp, sp, sp_mask, new_eip);
2809
            POPW(ssp, sp, sp_mask, new_cs);
2810
            if (is_iret) {
2811
                POPW(ssp, sp, sp_mask, new_eflags);
2812
            }
2813
        }
2814
    }
2815
    LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2816
              new_cs, new_eip, shift, addend);
2817
    LOG_PCALL_STATE(env);
2818
    if ((new_cs & 0xfffc) == 0) {
2819
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2820
    }
2821
    if (load_segment(&e1, &e2, new_cs) != 0) {
2822
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2823
    }
2824
    if (!(e2 & DESC_S_MASK) ||
2825
        !(e2 & DESC_CS_MASK)) {
2826
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2827
    }
2828
    cpl = env->hflags & HF_CPL_MASK;
2829
    rpl = new_cs & 3;
2830
    if (rpl < cpl) {
2831
        raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2832
    }
2833
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2834
    if (e2 & DESC_C_MASK) {
2835
        if (dpl > rpl) {
2836
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2837
        }
2838
    } else {
2839
        if (dpl != rpl) {
2840
            raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2841
        }
2842
    }
2843
    if (!(e2 & DESC_P_MASK)) {
2844
        raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2845
    }
2846

    
2847
    sp += addend;
2848
    if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2849
                       ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2850
        /* return to same privilege level */
2851
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2852
                       get_seg_base(e1, e2),
2853
                       get_seg_limit(e1, e2),
2854
                       e2);
2855
    } else {
2856
        /* return to different privilege level */
2857
#ifdef TARGET_X86_64
2858
        if (shift == 2) {
2859
            POPQ(sp, new_esp);
2860
            POPQ(sp, new_ss);
2861
            new_ss &= 0xffff;
2862
        } else
2863
#endif
2864
        {
2865
            if (shift == 1) {
2866
                /* 32 bits */
2867
                POPL(ssp, sp, sp_mask, new_esp);
2868
                POPL(ssp, sp, sp_mask, new_ss);
2869
                new_ss &= 0xffff;
2870
            } else {
2871
                /* 16 bits */
2872
                POPW(ssp, sp, sp_mask, new_esp);
2873
                POPW(ssp, sp, sp_mask, new_ss);
2874
            }
2875
        }
2876
        LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2877
                  new_ss, new_esp);
2878
        if ((new_ss & 0xfffc) == 0) {
2879
#ifdef TARGET_X86_64
2880
            /* NULL ss is allowed in long mode if cpl != 3 */
2881
            /* XXX: test CS64? */
2882
            if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2883
                cpu_x86_load_seg_cache(env, R_SS, new_ss,
2884
                                       0, 0xffffffff,
2885
                                       DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2886
                                       DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2887
                                       DESC_W_MASK | DESC_A_MASK);
2888
                ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2889
            } else
2890
#endif
2891
            {
2892
                raise_exception_err(env, EXCP0D_GPF, 0);
2893
            }
2894
        } else {
2895
            if ((new_ss & 3) != rpl) {
2896
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2897
            }
2898
            if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
2899
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2900
            }
2901
            if (!(ss_e2 & DESC_S_MASK) ||
2902
                (ss_e2 & DESC_CS_MASK) ||
2903
                !(ss_e2 & DESC_W_MASK)) {
2904
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2905
            }
2906
            dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2907
            if (dpl != rpl) {
2908
                raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2909
            }
2910
            if (!(ss_e2 & DESC_P_MASK)) {
2911
                raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2912
            }
2913
            cpu_x86_load_seg_cache(env, R_SS, new_ss,
2914
                                   get_seg_base(ss_e1, ss_e2),
2915
                                   get_seg_limit(ss_e1, ss_e2),
2916
                                   ss_e2);
2917
        }
2918

    
2919
        cpu_x86_load_seg_cache(env, R_CS, new_cs,
2920
                       get_seg_base(e1, e2),
2921
                       get_seg_limit(e1, e2),
2922
                       e2);
2923
        cpu_x86_set_cpl(env, rpl);
2924
        sp = new_esp;
2925
#ifdef TARGET_X86_64
2926
        if (env->hflags & HF_CS64_MASK) {
2927
            sp_mask = -1;
2928
        } else
2929
#endif
2930
        {
2931
            sp_mask = get_sp_mask(ss_e2);
2932
        }
2933

    
2934
        /* validate data segments */
2935
        validate_seg(R_ES, rpl);
2936
        validate_seg(R_DS, rpl);
2937
        validate_seg(R_FS, rpl);
2938
        validate_seg(R_GS, rpl);
2939

    
2940
        sp += addend;
2941
    }
2942
    SET_ESP(sp, sp_mask);
2943
    env->eip = new_eip;
2944
    if (is_iret) {
2945
        /* NOTE: 'cpl' is the _old_ CPL */
2946
        eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2947
        if (cpl == 0) {
2948
            eflags_mask |= IOPL_MASK;
2949
        }
2950
        iopl = (env->eflags >> IOPL_SHIFT) & 3;
2951
        if (cpl <= iopl) {
2952
            eflags_mask |= IF_MASK;
2953
        }
2954
        if (shift == 0) {
2955
            eflags_mask &= 0xffff;
2956
        }
2957
        cpu_load_eflags(env, new_eflags, eflags_mask);
2958
    }
2959
    return;
2960

    
2961
 return_to_vm86:
2962
    POPL(ssp, sp, sp_mask, new_esp);
2963
    POPL(ssp, sp, sp_mask, new_ss);
2964
    POPL(ssp, sp, sp_mask, new_es);
2965
    POPL(ssp, sp, sp_mask, new_ds);
2966
    POPL(ssp, sp, sp_mask, new_fs);
2967
    POPL(ssp, sp, sp_mask, new_gs);
2968

    
2969
    /* modify processor state */
2970
    cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2971
                    IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2972
                    VIP_MASK);
2973
    load_seg_vm(R_CS, new_cs & 0xffff);
2974
    cpu_x86_set_cpl(env, 3);
2975
    load_seg_vm(R_SS, new_ss & 0xffff);
2976
    load_seg_vm(R_ES, new_es & 0xffff);
2977
    load_seg_vm(R_DS, new_ds & 0xffff);
2978
    load_seg_vm(R_FS, new_fs & 0xffff);
2979
    load_seg_vm(R_GS, new_gs & 0xffff);
2980

    
2981
    env->eip = new_eip & 0xffff;
2982
    ESP = new_esp;
2983
}
2984

    
2985
void helper_iret_protected(int shift, int next_eip)
2986
{
2987
    int tss_selector, type;
2988
    uint32_t e1, e2;
2989

    
2990
    /* specific case for TSS */
2991
    if (env->eflags & NT_MASK) {
2992
#ifdef TARGET_X86_64
2993
        if (env->hflags & HF_LMA_MASK) {
2994
            raise_exception_err(env, EXCP0D_GPF, 0);
2995
        }
2996
#endif
2997
        tss_selector = lduw_kernel(env->tr.base + 0);
2998
        if (tss_selector & 4) {
2999
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3000
        }
3001
        if (load_segment(&e1, &e2, tss_selector) != 0) {
3002
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3003
        }
3004
        type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
3005
        /* NOTE: we check both segment and busy TSS */
3006
        if (type != 3) {
3007
            raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
3008
        }
3009
        switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
3010
    } else {
3011
        helper_ret_protected(shift, 1, 0);
3012
    }
3013
    env->hflags2 &= ~HF2_NMI_MASK;
3014
}
3015

    
3016
void helper_lret_protected(int shift, int addend)
3017
{
3018
    helper_ret_protected(shift, 0, addend);
3019
}
3020

    
3021
void helper_sysenter(void)
3022
{
3023
    if (env->sysenter_cs == 0) {
3024
        raise_exception_err(env, EXCP0D_GPF, 0);
3025
    }
3026
    env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
3027
    cpu_x86_set_cpl(env, 0);
3028

    
3029
#ifdef TARGET_X86_64
3030
    if (env->hflags & HF_LMA_MASK) {
3031
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3032
                               0, 0xffffffff,
3033
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3034
                               DESC_S_MASK |
3035
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3036
                               DESC_L_MASK);
3037
    } else
3038
#endif
3039
    {
3040
        cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
3041
                               0, 0xffffffff,
3042
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3043
                               DESC_S_MASK |
3044
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3045
    }
3046
    cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
3047
                           0, 0xffffffff,
3048
                           DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3049
                           DESC_S_MASK |
3050
                           DESC_W_MASK | DESC_A_MASK);
3051
    ESP = env->sysenter_esp;
3052
    EIP = env->sysenter_eip;
3053
}
3054

    
3055
void helper_sysexit(int dflag)
3056
{
3057
    int cpl;
3058

    
3059
    cpl = env->hflags & HF_CPL_MASK;
3060
    if (env->sysenter_cs == 0 || cpl != 0) {
3061
        raise_exception_err(env, EXCP0D_GPF, 0);
3062
    }
3063
    cpu_x86_set_cpl(env, 3);
3064
#ifdef TARGET_X86_64
3065
    if (dflag == 2) {
3066
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
3067
                               3, 0, 0xffffffff,
3068
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3069
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3070
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
3071
                               DESC_L_MASK);
3072
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
3073
                               3, 0, 0xffffffff,
3074
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3075
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3076
                               DESC_W_MASK | DESC_A_MASK);
3077
    } else
3078
#endif
3079
    {
3080
        cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
3081
                               3, 0, 0xffffffff,
3082
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3083
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3084
                               DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3085
        cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
3086
                               3, 0, 0xffffffff,
3087
                               DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3088
                               DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3089
                               DESC_W_MASK | DESC_A_MASK);
3090
    }
3091
    ESP = ECX;
3092
    EIP = EDX;
3093
}
3094

    
3095
#if defined(CONFIG_USER_ONLY)
3096
target_ulong helper_read_crN(int reg)
3097
{
3098
    return 0;
3099
}
3100

    
3101
void helper_write_crN(int reg, target_ulong t0)
3102
{
3103
}
3104

    
3105
void helper_movl_drN_T0(int reg, target_ulong t0)
3106
{
3107
}
3108
#else
3109
target_ulong helper_read_crN(int reg)
3110
{
3111
    target_ulong val;
3112

    
3113
    helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3114
    switch (reg) {
3115
    default:
3116
        val = env->cr[reg];
3117
        break;
3118
    case 8:
3119
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3120
            val = cpu_get_apic_tpr(env->apic_state);
3121
        } else {
3122
            val = env->v_tpr;
3123
        }
3124
        break;
3125
    }
3126
    return val;
3127
}
3128

    
3129
void helper_write_crN(int reg, target_ulong t0)
3130
{
3131
    helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3132
    switch (reg) {
3133
    case 0:
3134
        cpu_x86_update_cr0(env, t0);
3135
        break;
3136
    case 3:
3137
        cpu_x86_update_cr3(env, t0);
3138
        break;
3139
    case 4:
3140
        cpu_x86_update_cr4(env, t0);
3141
        break;
3142
    case 8:
3143
        if (!(env->hflags2 & HF2_VINTR_MASK)) {
3144
            cpu_set_apic_tpr(env->apic_state, t0);
3145
        }
3146
        env->v_tpr = t0 & 0x0f;
3147
        break;
3148
    default:
3149
        env->cr[reg] = t0;
3150
        break;
3151
    }
3152
}
3153

    
3154
void helper_movl_drN_T0(int reg, target_ulong t0)
3155
{
3156
    int i;
3157

    
3158
    if (reg < 4) {
3159
        hw_breakpoint_remove(env, reg);
3160
        env->dr[reg] = t0;
3161
        hw_breakpoint_insert(env, reg);
3162
    } else if (reg == 7) {
3163
        for (i = 0; i < 4; i++) {
3164
            hw_breakpoint_remove(env, i);
3165
        }
3166
        env->dr[7] = t0;
3167
        for (i = 0; i < 4; i++) {
3168
            hw_breakpoint_insert(env, i);
3169
        }
3170
    } else {
3171
        env->dr[reg] = t0;
3172
    }
3173
}
3174
#endif
3175

    
3176
void helper_lmsw(target_ulong t0)
3177
{
3178
    /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3179
       if already set to one. */
3180
    t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3181
    helper_write_crN(0, t0);
3182
}
3183

    
3184
void helper_clts(void)
3185
{
3186
    env->cr[0] &= ~CR0_TS_MASK;
3187
    env->hflags &= ~HF_TS_MASK;
3188
}
3189

    
3190
void helper_invlpg(target_ulong addr)
3191
{
3192
    helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3193
    tlb_flush_page(env, addr);
3194
}
3195

    
3196
void helper_rdtsc(void)
3197
{
3198
    uint64_t val;
3199

    
3200
    if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3201
        raise_exception(env, EXCP0D_GPF);
3202
    }
3203
    helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3204

    
3205
    val = cpu_get_tsc(env) + env->tsc_offset;
3206
    EAX = (uint32_t)(val);
3207
    EDX = (uint32_t)(val >> 32);
3208
}
3209

    
3210
void helper_rdtscp(void)
3211
{
3212
    helper_rdtsc();
3213
    ECX = (uint32_t)(env->tsc_aux);
3214
}
3215

    
3216
void helper_rdpmc(void)
3217
{
3218
    if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3219
        raise_exception(env, EXCP0D_GPF);
3220
    }
3221
    helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3222

    
3223
    /* currently unimplemented */
3224
    qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
3225
    raise_exception_err(env, EXCP06_ILLOP, 0);
3226
}
3227

    
3228
#if defined(CONFIG_USER_ONLY)
3229
void helper_wrmsr(void)
3230
{
3231
}
3232

    
3233
void helper_rdmsr(void)
3234
{
3235
}
3236
#else
3237
void helper_wrmsr(void)
3238
{
3239
    uint64_t val;
3240

    
3241
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3242

    
3243
    val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3244

    
3245
    switch ((uint32_t)ECX) {
3246
    case MSR_IA32_SYSENTER_CS:
3247
        env->sysenter_cs = val & 0xffff;
3248
        break;
3249
    case MSR_IA32_SYSENTER_ESP:
3250
        env->sysenter_esp = val;
3251
        break;
3252
    case MSR_IA32_SYSENTER_EIP:
3253
        env->sysenter_eip = val;
3254
        break;
3255
    case MSR_IA32_APICBASE:
3256
        cpu_set_apic_base(env->apic_state, val);
3257
        break;
3258
    case MSR_EFER:
3259
        {
3260
            uint64_t update_mask;
3261

    
3262
            update_mask = 0;
3263
            if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
3264
                update_mask |= MSR_EFER_SCE;
3265
            }
3266
            if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
3267
                update_mask |= MSR_EFER_LME;
3268
            }
3269
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
3270
                update_mask |= MSR_EFER_FFXSR;
3271
            }
3272
            if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
3273
                update_mask |= MSR_EFER_NXE;
3274
            }
3275
            if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
3276
                update_mask |= MSR_EFER_SVME;
3277
            }
3278
            if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
3279
                update_mask |= MSR_EFER_FFXSR;
3280
            }
3281
            cpu_load_efer(env, (env->efer & ~update_mask) |
3282
                          (val & update_mask));
3283
        }
3284
        break;
3285
    case MSR_STAR:
3286
        env->star = val;
3287
        break;
3288
    case MSR_PAT:
3289
        env->pat = val;
3290
        break;
3291
    case MSR_VM_HSAVE_PA:
3292
        env->vm_hsave = val;
3293
        break;
3294
#ifdef TARGET_X86_64
3295
    case MSR_LSTAR:
3296
        env->lstar = val;
3297
        break;
3298
    case MSR_CSTAR:
3299
        env->cstar = val;
3300
        break;
3301
    case MSR_FMASK:
3302
        env->fmask = val;
3303
        break;
3304
    case MSR_FSBASE:
3305
        env->segs[R_FS].base = val;
3306
        break;
3307
    case MSR_GSBASE:
3308
        env->segs[R_GS].base = val;
3309
        break;
3310
    case MSR_KERNELGSBASE:
3311
        env->kernelgsbase = val;
3312
        break;
3313
#endif
3314
    case MSR_MTRRphysBase(0):
3315
    case MSR_MTRRphysBase(1):
3316
    case MSR_MTRRphysBase(2):
3317
    case MSR_MTRRphysBase(3):
3318
    case MSR_MTRRphysBase(4):
3319
    case MSR_MTRRphysBase(5):
3320
    case MSR_MTRRphysBase(6):
3321
    case MSR_MTRRphysBase(7):
3322
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3323
        break;
3324
    case MSR_MTRRphysMask(0):
3325
    case MSR_MTRRphysMask(1):
3326
    case MSR_MTRRphysMask(2):
3327
    case MSR_MTRRphysMask(3):
3328
    case MSR_MTRRphysMask(4):
3329
    case MSR_MTRRphysMask(5):
3330
    case MSR_MTRRphysMask(6):
3331
    case MSR_MTRRphysMask(7):
3332
        env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3333
        break;
3334
    case MSR_MTRRfix64K_00000:
3335
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3336
        break;
3337
    case MSR_MTRRfix16K_80000:
3338
    case MSR_MTRRfix16K_A0000:
3339
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3340
        break;
3341
    case MSR_MTRRfix4K_C0000:
3342
    case MSR_MTRRfix4K_C8000:
3343
    case MSR_MTRRfix4K_D0000:
3344
    case MSR_MTRRfix4K_D8000:
3345
    case MSR_MTRRfix4K_E0000:
3346
    case MSR_MTRRfix4K_E8000:
3347
    case MSR_MTRRfix4K_F0000:
3348
    case MSR_MTRRfix4K_F8000:
3349
        env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3350
        break;
3351
    case MSR_MTRRdefType:
3352
        env->mtrr_deftype = val;
3353
        break;
3354
    case MSR_MCG_STATUS:
3355
        env->mcg_status = val;
3356
        break;
3357
    case MSR_MCG_CTL:
3358
        if ((env->mcg_cap & MCG_CTL_P)
3359
            && (val == 0 || val == ~(uint64_t)0)) {
3360
            env->mcg_ctl = val;
3361
        }
3362
        break;
3363
    case MSR_TSC_AUX:
3364
        env->tsc_aux = val;
3365
        break;
3366
    case MSR_IA32_MISC_ENABLE:
3367
        env->msr_ia32_misc_enable = val;
3368
        break;
3369
    default:
3370
        if ((uint32_t)ECX >= MSR_MC0_CTL
3371
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3372
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3373
            if ((offset & 0x3) != 0
3374
                || (val == 0 || val == ~(uint64_t)0)) {
3375
                env->mce_banks[offset] = val;
3376
            }
3377
            break;
3378
        }
3379
        /* XXX: exception? */
3380
        break;
3381
    }
3382
}
3383

    
3384
void helper_rdmsr(void)
3385
{
3386
    uint64_t val;
3387

    
3388
    helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3389

    
3390
    switch ((uint32_t)ECX) {
3391
    case MSR_IA32_SYSENTER_CS:
3392
        val = env->sysenter_cs;
3393
        break;
3394
    case MSR_IA32_SYSENTER_ESP:
3395
        val = env->sysenter_esp;
3396
        break;
3397
    case MSR_IA32_SYSENTER_EIP:
3398
        val = env->sysenter_eip;
3399
        break;
3400
    case MSR_IA32_APICBASE:
3401
        val = cpu_get_apic_base(env->apic_state);
3402
        break;
3403
    case MSR_EFER:
3404
        val = env->efer;
3405
        break;
3406
    case MSR_STAR:
3407
        val = env->star;
3408
        break;
3409
    case MSR_PAT:
3410
        val = env->pat;
3411
        break;
3412
    case MSR_VM_HSAVE_PA:
3413
        val = env->vm_hsave;
3414
        break;
3415
    case MSR_IA32_PERF_STATUS:
3416
        /* tsc_increment_by_tick */
3417
        val = 1000ULL;
3418
        /* CPU multiplier */
3419
        val |= (((uint64_t)4ULL) << 40);
3420
        break;
3421
#ifdef TARGET_X86_64
3422
    case MSR_LSTAR:
3423
        val = env->lstar;
3424
        break;
3425
    case MSR_CSTAR:
3426
        val = env->cstar;
3427
        break;
3428
    case MSR_FMASK:
3429
        val = env->fmask;
3430
        break;
3431
    case MSR_FSBASE:
3432
        val = env->segs[R_FS].base;
3433
        break;
3434
    case MSR_GSBASE:
3435
        val = env->segs[R_GS].base;
3436
        break;
3437
    case MSR_KERNELGSBASE:
3438
        val = env->kernelgsbase;
3439
        break;
3440
    case MSR_TSC_AUX:
3441
        val = env->tsc_aux;
3442
        break;
3443
#endif
3444
    case MSR_MTRRphysBase(0):
3445
    case MSR_MTRRphysBase(1):
3446
    case MSR_MTRRphysBase(2):
3447
    case MSR_MTRRphysBase(3):
3448
    case MSR_MTRRphysBase(4):
3449
    case MSR_MTRRphysBase(5):
3450
    case MSR_MTRRphysBase(6):
3451
    case MSR_MTRRphysBase(7):
3452
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3453
        break;
3454
    case MSR_MTRRphysMask(0):
3455
    case MSR_MTRRphysMask(1):
3456
    case MSR_MTRRphysMask(2):
3457
    case MSR_MTRRphysMask(3):
3458
    case MSR_MTRRphysMask(4):
3459
    case MSR_MTRRphysMask(5):
3460
    case MSR_MTRRphysMask(6):
3461
    case MSR_MTRRphysMask(7):
3462
        val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3463
        break;
3464
    case MSR_MTRRfix64K_00000:
3465
        val = env->mtrr_fixed[0];
3466
        break;
3467
    case MSR_MTRRfix16K_80000:
3468
    case MSR_MTRRfix16K_A0000:
3469
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3470
        break;
3471
    case MSR_MTRRfix4K_C0000:
3472
    case MSR_MTRRfix4K_C8000:
3473
    case MSR_MTRRfix4K_D0000:
3474
    case MSR_MTRRfix4K_D8000:
3475
    case MSR_MTRRfix4K_E0000:
3476
    case MSR_MTRRfix4K_E8000:
3477
    case MSR_MTRRfix4K_F0000:
3478
    case MSR_MTRRfix4K_F8000:
3479
        val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3480
        break;
3481
    case MSR_MTRRdefType:
3482
        val = env->mtrr_deftype;
3483
        break;
3484
    case MSR_MTRRcap:
3485
        if (env->cpuid_features & CPUID_MTRR) {
3486
            val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3487
                MSR_MTRRcap_WC_SUPPORTED;
3488
        } else {
3489
            /* XXX: exception? */
3490
            val = 0;
3491
        }
3492
        break;
3493
    case MSR_MCG_CAP:
3494
        val = env->mcg_cap;
3495
        break;
3496
    case MSR_MCG_CTL:
3497
        if (env->mcg_cap & MCG_CTL_P) {
3498
            val = env->mcg_ctl;
3499
        } else {
3500
            val = 0;
3501
        }
3502
        break;
3503
    case MSR_MCG_STATUS:
3504
        val = env->mcg_status;
3505
        break;
3506
    case MSR_IA32_MISC_ENABLE:
3507
        val = env->msr_ia32_misc_enable;
3508
        break;
3509
    default:
3510
        if ((uint32_t)ECX >= MSR_MC0_CTL
3511
            && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3512
            uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3513
            val = env->mce_banks[offset];
3514
            break;
3515
        }
3516
        /* XXX: exception? */
3517
        val = 0;
3518
        break;
3519
    }
3520
    EAX = (uint32_t)(val);
3521
    EDX = (uint32_t)(val >> 32);
3522
}
3523
#endif
3524

    
3525
target_ulong helper_lsl(target_ulong selector1)
3526
{
3527
    unsigned int limit;
3528
    uint32_t e1, e2, eflags, selector;
3529
    int rpl, dpl, cpl, type;
3530

    
3531
    selector = selector1 & 0xffff;
3532
    eflags = helper_cc_compute_all(CC_OP);
3533
    if ((selector & 0xfffc) == 0) {
3534
        goto fail;
3535
    }
3536
    if (load_segment(&e1, &e2, selector) != 0) {
3537
        goto fail;
3538
    }
3539
    rpl = selector & 3;
3540
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3541
    cpl = env->hflags & HF_CPL_MASK;
3542
    if (e2 & DESC_S_MASK) {
3543
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3544
            /* conforming */
3545
        } else {
3546
            if (dpl < cpl || dpl < rpl) {
3547
                goto fail;
3548
            }
3549
        }
3550
    } else {
3551
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3552
        switch (type) {
3553
        case 1:
3554
        case 2:
3555
        case 3:
3556
        case 9:
3557
        case 11:
3558
            break;
3559
        default:
3560
            goto fail;
3561
        }
3562
        if (dpl < cpl || dpl < rpl) {
3563
        fail:
3564
            CC_SRC = eflags & ~CC_Z;
3565
            return 0;
3566
        }
3567
    }
3568
    limit = get_seg_limit(e1, e2);
3569
    CC_SRC = eflags | CC_Z;
3570
    return limit;
3571
}
3572

    
3573
target_ulong helper_lar(target_ulong selector1)
3574
{
3575
    uint32_t e1, e2, eflags, selector;
3576
    int rpl, dpl, cpl, type;
3577

    
3578
    selector = selector1 & 0xffff;
3579
    eflags = helper_cc_compute_all(CC_OP);
3580
    if ((selector & 0xfffc) == 0) {
3581
        goto fail;
3582
    }
3583
    if (load_segment(&e1, &e2, selector) != 0) {
3584
        goto fail;
3585
    }
3586
    rpl = selector & 3;
3587
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3588
    cpl = env->hflags & HF_CPL_MASK;
3589
    if (e2 & DESC_S_MASK) {
3590
        if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3591
            /* conforming */
3592
        } else {
3593
            if (dpl < cpl || dpl < rpl) {
3594
                goto fail;
3595
            }
3596
        }
3597
    } else {
3598
        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3599
        switch (type) {
3600
        case 1:
3601
        case 2:
3602
        case 3:
3603
        case 4:
3604
        case 5:
3605
        case 9:
3606
        case 11:
3607
        case 12:
3608
            break;
3609
        default:
3610
            goto fail;
3611
        }
3612
        if (dpl < cpl || dpl < rpl) {
3613
        fail:
3614
            CC_SRC = eflags & ~CC_Z;
3615
            return 0;
3616
        }
3617
    }
3618
    CC_SRC = eflags | CC_Z;
3619
    return e2 & 0x00f0ff00;
3620
}
3621

    
3622
void helper_verr(target_ulong selector1)
3623
{
3624
    uint32_t e1, e2, eflags, selector;
3625
    int rpl, dpl, cpl;
3626

    
3627
    selector = selector1 & 0xffff;
3628
    eflags = helper_cc_compute_all(CC_OP);
3629
    if ((selector & 0xfffc) == 0) {
3630
        goto fail;
3631
    }
3632
    if (load_segment(&e1, &e2, selector) != 0) {
3633
        goto fail;
3634
    }
3635
    if (!(e2 & DESC_S_MASK)) {
3636
        goto fail;
3637
    }
3638
    rpl = selector & 3;
3639
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3640
    cpl = env->hflags & HF_CPL_MASK;
3641
    if (e2 & DESC_CS_MASK) {
3642
        if (!(e2 & DESC_R_MASK)) {
3643
            goto fail;
3644
        }
3645
        if (!(e2 & DESC_C_MASK)) {
3646
            if (dpl < cpl || dpl < rpl) {
3647
                goto fail;
3648
            }
3649
        }
3650
    } else {
3651
        if (dpl < cpl || dpl < rpl) {
3652
        fail:
3653
            CC_SRC = eflags & ~CC_Z;
3654
            return;
3655
        }
3656
    }
3657
    CC_SRC = eflags | CC_Z;
3658
}
3659

    
3660
void helper_verw(target_ulong selector1)
3661
{
3662
    uint32_t e1, e2, eflags, selector;
3663
    int rpl, dpl, cpl;
3664

    
3665
    selector = selector1 & 0xffff;
3666
    eflags = helper_cc_compute_all(CC_OP);
3667
    if ((selector & 0xfffc) == 0) {
3668
        goto fail;
3669
    }
3670
    if (load_segment(&e1, &e2, selector) != 0) {
3671
        goto fail;
3672
    }
3673
    if (!(e2 & DESC_S_MASK)) {
3674
        goto fail;
3675
    }
3676
    rpl = selector & 3;
3677
    dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3678
    cpl = env->hflags & HF_CPL_MASK;
3679
    if (e2 & DESC_CS_MASK) {
3680
        goto fail;
3681
    } else {
3682
        if (dpl < cpl || dpl < rpl) {
3683
            goto fail;
3684
        }
3685
        if (!(e2 & DESC_W_MASK)) {
3686
        fail:
3687
            CC_SRC = eflags & ~CC_Z;
3688
            return;
3689
        }
3690
    }
3691
    CC_SRC = eflags | CC_Z;
3692
}
3693

    
3694
#if defined(CONFIG_USER_ONLY)
3695
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
3696
{
3697
    CPUX86State *saved_env;
3698

    
3699
    saved_env = env;
3700
    env = s;
3701
    if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3702
        selector &= 0xffff;
3703
        cpu_x86_load_seg_cache(env, seg_reg, selector,
3704
                               (selector << 4), 0xffff, 0);
3705
    } else {
3706
        helper_load_seg(seg_reg, selector);
3707
    }
3708
    env = saved_env;
3709
}
3710
#endif
3711

    
3712
#ifdef TARGET_X86_64
3713
static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3714
{
3715
    *plow += a;
3716
    /* carry test */
3717
    if (*plow < a) {
3718
        (*phigh)++;
3719
    }
3720
    *phigh += b;
3721
}
3722

    
3723
static void neg128(uint64_t *plow, uint64_t *phigh)
3724
{
3725
    *plow = ~*plow;
3726
    *phigh = ~*phigh;
3727
    add128(plow, phigh, 1, 0);
3728
}
3729

    
3730
/* return TRUE if overflow */
3731
static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3732
{
3733
    uint64_t q, r, a1, a0;
3734
    int i, qb, ab;
3735

    
3736
    a0 = *plow;
3737
    a1 = *phigh;
3738
    if (a1 == 0) {
3739
        q = a0 / b;
3740
        r = a0 % b;
3741
        *plow = q;
3742
        *phigh = r;
3743
    } else {
3744
        if (a1 >= b) {
3745
            return 1;
3746
        }
3747
        /* XXX: use a better algorithm */
3748
        for (i = 0; i < 64; i++) {
3749
            ab = a1 >> 63;
3750
            a1 = (a1 << 1) | (a0 >> 63);
3751
            if (ab || a1 >= b) {
3752
                a1 -= b;
3753
                qb = 1;
3754
            } else {
3755
                qb = 0;
3756
            }
3757
            a0 = (a0 << 1) | qb;
3758
        }
3759
#if defined(DEBUG_MULDIV)
3760
        printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
3761
               ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3762
               *phigh, *plow, b, a0, a1);
3763
#endif
3764
        *plow = a0;
3765
        *phigh = a1;
3766
    }
3767
    return 0;
3768
}
3769

    
3770
/* return TRUE if overflow */
3771
static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3772
{
3773
    int sa, sb;
3774

    
3775
    sa = ((int64_t)*phigh < 0);
3776
    if (sa) {
3777
        neg128(plow, phigh);
3778
    }
3779
    sb = (b < 0);
3780
    if (sb) {
3781
        b = -b;
3782
    }
3783
    if (div64(plow, phigh, b) != 0) {
3784
        return 1;
3785
    }
3786
    if (sa ^ sb) {
3787
        if (*plow > (1ULL << 63)) {
3788
            return 1;
3789
        }
3790
        *plow = -*plow;
3791
    } else {
3792
        if (*plow >= (1ULL << 63)) {
3793
            return 1;
3794
        }
3795
    }
3796
    if (sa) {
3797
        *phigh = -*phigh;
3798
    }
3799
    return 0;
3800
}
3801

    
3802
void helper_mulq_EAX_T0(target_ulong t0)
3803
{
3804
    uint64_t r0, r1;
3805

    
3806
    mulu64(&r0, &r1, EAX, t0);
3807
    EAX = r0;
3808
    EDX = r1;
3809
    CC_DST = r0;
3810
    CC_SRC = r1;
3811
}
3812

    
3813
void helper_imulq_EAX_T0(target_ulong t0)
3814
{
3815
    uint64_t r0, r1;
3816

    
3817
    muls64(&r0, &r1, EAX, t0);
3818
    EAX = r0;
3819
    EDX = r1;
3820
    CC_DST = r0;
3821
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3822
}
3823

    
3824
target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
3825
{
3826
    uint64_t r0, r1;
3827

    
3828
    muls64(&r0, &r1, t0, t1);
3829
    CC_DST = r0;
3830
    CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3831
    return r0;
3832
}
3833

    
3834
void helper_divq_EAX(target_ulong t0)
3835
{
3836
    uint64_t r0, r1;
3837

    
3838
    if (t0 == 0) {
3839
        raise_exception(env, EXCP00_DIVZ);
3840
    }
3841
    r0 = EAX;
3842
    r1 = EDX;
3843
    if (div64(&r0, &r1, t0)) {
3844
        raise_exception(env, EXCP00_DIVZ);
3845
    }
3846
    EAX = r0;
3847
    EDX = r1;
3848
}
3849

    
3850
void helper_idivq_EAX(target_ulong t0)
3851
{
3852
    uint64_t r0, r1;
3853

    
3854
    if (t0 == 0) {
3855
        raise_exception(env, EXCP00_DIVZ);
3856
    }
3857
    r0 = EAX;
3858
    r1 = EDX;
3859
    if (idiv64(&r0, &r1, t0)) {
3860
        raise_exception(env, EXCP00_DIVZ);
3861
    }
3862
    EAX = r0;
3863
    EDX = r1;
3864
}
3865
#endif
3866

    
3867
static void do_hlt(void)
3868
{
3869
    env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3870
    env->halted = 1;
3871
    env->exception_index = EXCP_HLT;
3872
    cpu_loop_exit(env);
3873
}
3874

    
3875
void helper_hlt(int next_eip_addend)
3876
{
3877
    helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
3878
    EIP += next_eip_addend;
3879

    
3880
    do_hlt();
3881
}
3882

    
3883
void helper_monitor(target_ulong ptr)
3884
{
3885
    if ((uint32_t)ECX != 0) {
3886
        raise_exception(env, EXCP0D_GPF);
3887
    }
3888
    /* XXX: store address? */
3889
    helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
3890
}
3891

    
3892
void helper_mwait(int next_eip_addend)
3893
{
3894
    if ((uint32_t)ECX != 0) {
3895
        raise_exception(env, EXCP0D_GPF);
3896
    }
3897
    helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
3898
    EIP += next_eip_addend;
3899

    
3900
    /* XXX: not complete but not completely erroneous */
3901
    if (env->cpu_index != 0 || env->next_cpu != NULL) {
3902
        /* more than one CPU: do not sleep because another CPU may
3903
           wake this one */
3904
    } else {
3905
        do_hlt();
3906
    }
3907
}
3908

    
3909
void helper_debug(void)
3910
{
3911
    env->exception_index = EXCP_DEBUG;
3912
    cpu_loop_exit(env);
3913
}
3914

    
3915
void helper_reset_rf(void)
3916
{
3917
    env->eflags &= ~RF_MASK;
3918
}
3919

    
3920
void helper_cli(void)
3921
{
3922
    env->eflags &= ~IF_MASK;
3923
}
3924

    
3925
void helper_sti(void)
3926
{
3927
    env->eflags |= IF_MASK;
3928
}
3929

    
3930
#if 0
3931
/* vm86plus instructions */
3932
void helper_cli_vm(void)
3933
{
3934
    env->eflags &= ~VIF_MASK;
3935
}
3936

3937
void helper_sti_vm(void)
3938
{
3939
    env->eflags |= VIF_MASK;
3940
    if (env->eflags & VIP_MASK) {
3941
        raise_exception(env, EXCP0D_GPF);
3942
    }
3943
}
3944
#endif
3945

    
3946
void helper_set_inhibit_irq(void)
3947
{
3948
    env->hflags |= HF_INHIBIT_IRQ_MASK;
3949
}
3950

    
3951
void helper_reset_inhibit_irq(void)
3952
{
3953
    env->hflags &= ~HF_INHIBIT_IRQ_MASK;
3954
}
3955

    
3956
void helper_boundw(target_ulong a0, int v)
3957
{
3958
    int low, high;
3959

    
3960
    low = ldsw(a0);
3961
    high = ldsw(a0 + 2);
3962
    v = (int16_t)v;
3963
    if (v < low || v > high) {
3964
        raise_exception(env, EXCP05_BOUND);
3965
    }
3966
}
3967

    
3968
void helper_boundl(target_ulong a0, int v)
3969
{
3970
    int low, high;
3971

    
3972
    low = ldl(a0);
3973
    high = ldl(a0 + 4);
3974
    if (v < low || v > high) {
3975
        raise_exception(env, EXCP05_BOUND);
3976
    }
3977
}
3978

    
3979
#if !defined(CONFIG_USER_ONLY)
3980

    
3981
#define MMUSUFFIX _mmu
3982

    
3983
#define SHIFT 0
3984
#include "softmmu_template.h"
3985

    
3986
#define SHIFT 1
3987
#include "softmmu_template.h"
3988

    
3989
#define SHIFT 2
3990
#include "softmmu_template.h"
3991

    
3992
#define SHIFT 3
3993
#include "softmmu_template.h"
3994

    
3995
#endif
3996

    
3997
#if !defined(CONFIG_USER_ONLY)
3998
/* try to fill the TLB and return an exception if error. If retaddr is
3999
   NULL, it means that the function was called in C code (i.e. not
4000
   from generated code or from helper.c) */
4001
/* XXX: fix it to restore all registers */
4002
void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
4003
              uintptr_t retaddr)
4004
{
4005
    TranslationBlock *tb;
4006
    int ret;
4007
    CPUX86State *saved_env;
4008

    
4009
    saved_env = env;
4010
    env = env1;
4011

    
4012
    ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
4013
    if (ret) {
4014
        if (retaddr) {
4015
            /* now we have a real cpu fault */
4016
            tb = tb_find_pc(retaddr);
4017
            if (tb) {
4018
                /* the PC is inside the translated code. It means that we have
4019
                   a virtual CPU fault */
4020
                cpu_restore_state(tb, env, retaddr);
4021
            }
4022
        }
4023
        raise_exception_err(env, env->exception_index, env->error_code);
4024
    }
4025
    env = saved_env;
4026
}
4027
#endif
4028

    
4029
/* Secure Virtual Machine helpers */
4030

    
4031
#if defined(CONFIG_USER_ONLY)
4032

    
4033
void helper_vmrun(int aflag, int next_eip_addend)
4034
{
4035
}
4036

    
4037
void helper_vmmcall(void)
4038
{
4039
}
4040

    
4041
void helper_vmload(int aflag)
4042
{
4043
}
4044

    
4045
void helper_vmsave(int aflag)
4046
{
4047
}
4048

    
4049
void helper_stgi(void)
4050
{
4051
}
4052

    
4053
void helper_clgi(void)
4054
{
4055
}
4056

    
4057
void helper_skinit(void)
4058
{
4059
}
4060

    
4061
void helper_invlpga(int aflag)
4062
{
4063
}
4064

    
4065
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4066
{
4067
}
4068

    
4069
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4070
{
4071
}
4072

    
4073
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4074
{
4075
}
4076

    
4077
void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
4078
                                   uint64_t param)
4079
{
4080
}
4081

    
4082
void helper_svm_check_io(uint32_t port, uint32_t param,
4083
                         uint32_t next_eip_addend)
4084
{
4085
}
4086
#else
4087

    
4088
static inline void svm_save_seg(target_phys_addr_t addr,
4089
                                const SegmentCache *sc)
4090
{
4091
    stw_phys(addr + offsetof(struct vmcb_seg, selector),
4092
             sc->selector);
4093
    stq_phys(addr + offsetof(struct vmcb_seg, base),
4094
             sc->base);
4095
    stl_phys(addr + offsetof(struct vmcb_seg, limit),
4096
             sc->limit);
4097
    stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4098
             ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4099
}
4100

    
4101
static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4102
{
4103
    unsigned int flags;
4104

    
4105
    sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4106
    sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4107
    sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4108
    flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4109
    sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4110
}
4111

    
4112
static inline void svm_load_seg_cache(target_phys_addr_t addr,
4113
                                      CPUX86State *env, int seg_reg)
4114
{
4115
    SegmentCache sc1, *sc = &sc1;
4116

    
4117
    svm_load_seg(addr, sc);
4118
    cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4119
                           sc->base, sc->limit, sc->flags);
4120
}
4121

    
4122
void helper_vmrun(int aflag, int next_eip_addend)
4123
{
4124
    target_ulong addr;
4125
    uint32_t event_inj;
4126
    uint32_t int_ctl;
4127

    
4128
    helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4129

    
4130
    if (aflag == 2) {
4131
        addr = EAX;
4132
    } else {
4133
        addr = (uint32_t)EAX;
4134
    }
4135

    
4136
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4137

    
4138
    env->vm_vmcb = addr;
4139

    
4140
    /* save the current CPU state in the hsave page */
4141
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
4142
             env->gdt.base);
4143
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
4144
             env->gdt.limit);
4145

    
4146
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
4147
             env->idt.base);
4148
    stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
4149
             env->idt.limit);
4150

    
4151
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4152
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4153
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4154
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4155
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4156
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4157

    
4158
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4159
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
4160
             cpu_compute_eflags(env));
4161

    
4162
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4163
                 &env->segs[R_ES]);
4164
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4165
                 &env->segs[R_CS]);
4166
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4167
                 &env->segs[R_SS]);
4168
    svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4169
                 &env->segs[R_DS]);
4170

    
4171
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4172
             EIP + next_eip_addend);
4173
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4174
    stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4175

    
4176
    /* load the interception bitmaps so we do not need to access the
4177
       vmcb in svm mode */
4178
    env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4179
                                                      control.intercept));
4180
    env->intercept_cr_read = lduw_phys(env->vm_vmcb +
4181
                                       offsetof(struct vmcb,
4182
                                                control.intercept_cr_read));
4183
    env->intercept_cr_write = lduw_phys(env->vm_vmcb +
4184
                                        offsetof(struct vmcb,
4185
                                                 control.intercept_cr_write));
4186
    env->intercept_dr_read = lduw_phys(env->vm_vmcb +
4187
                                       offsetof(struct vmcb,
4188
                                                control.intercept_dr_read));
4189
    env->intercept_dr_write = lduw_phys(env->vm_vmcb +
4190
                                        offsetof(struct vmcb,
4191
                                                 control.intercept_dr_write));
4192
    env->intercept_exceptions = ldl_phys(env->vm_vmcb +
4193
                                         offsetof(struct vmcb,
4194
                                                  control.intercept_exceptions
4195
                                                  ));
4196

    
4197
    /* enable intercepts */
4198
    env->hflags |= HF_SVMI_MASK;
4199

    
4200
    env->tsc_offset = ldq_phys(env->vm_vmcb +
4201
                               offsetof(struct vmcb, control.tsc_offset));
4202

    
4203
    env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4204
                                                      save.gdtr.base));
4205
    env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4206
                                                      save.gdtr.limit));
4207

    
4208
    env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4209
                                                      save.idtr.base));
4210
    env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4211
                                                      save.idtr.limit));
4212

    
4213
    /* clear exit_info_2 so we behave like the real hardware */
4214
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4215

    
4216
    cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4217
                                                             save.cr0)));
4218
    cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4219
                                                             save.cr4)));
4220
    cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4221
                                                             save.cr3)));
4222
    env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4223
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4224
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4225
    if (int_ctl & V_INTR_MASKING_MASK) {
4226
        env->v_tpr = int_ctl & V_TPR_MASK;
4227
        env->hflags2 |= HF2_VINTR_MASK;
4228
        if (env->eflags & IF_MASK) {
4229
            env->hflags2 |= HF2_HIF_MASK;
4230
        }
4231
    }
4232

    
4233
    cpu_load_efer(env,
4234
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4235
    env->eflags = 0;
4236
    cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4237
                                                          save.rflags)),
4238
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4239
    CC_OP = CC_OP_EFLAGS;
4240

    
4241
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4242
                       env, R_ES);
4243
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4244
                       env, R_CS);
4245
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4246
                       env, R_SS);
4247
    svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4248
                       env, R_DS);
4249

    
4250
    EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4251
    env->eip = EIP;
4252
    ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4253
    EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4254
    env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4255
    env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4256
    cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
4257
                                                           save.cpl)));
4258

    
4259
    /* FIXME: guest state consistency checks */
4260

    
4261
    switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4262
    case TLB_CONTROL_DO_NOTHING:
4263
        break;
4264
    case TLB_CONTROL_FLUSH_ALL_ASID:
4265
        /* FIXME: this is not 100% correct but should work for now */
4266
        tlb_flush(env, 1);
4267
        break;
4268
    }
4269

    
4270
    env->hflags2 |= HF2_GIF_MASK;
4271

    
4272
    if (int_ctl & V_IRQ_MASK) {
4273
        env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4274
    }
4275

    
4276
    /* maybe we need to inject an event */
4277
    event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4278
                                                 control.event_inj));
4279
    if (event_inj & SVM_EVTINJ_VALID) {
4280
        uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4281
        uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4282
        uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
4283
                                          offsetof(struct vmcb,
4284
                                                   control.event_inj_err));
4285

    
4286
        qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4287
        /* FIXME: need to implement valid_err */
4288
        switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4289
        case SVM_EVTINJ_TYPE_INTR:
4290
            env->exception_index = vector;
4291
            env->error_code = event_inj_err;
4292
            env->exception_is_int = 0;
4293
            env->exception_next_eip = -1;
4294
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4295
            /* XXX: is it always correct? */
4296
            do_interrupt_x86_hardirq(env, vector, 1);
4297
            break;
4298
        case SVM_EVTINJ_TYPE_NMI:
4299
            env->exception_index = EXCP02_NMI;
4300
            env->error_code = event_inj_err;
4301
            env->exception_is_int = 0;
4302
            env->exception_next_eip = EIP;
4303
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4304
            cpu_loop_exit(env);
4305
            break;
4306
        case SVM_EVTINJ_TYPE_EXEPT:
4307
            env->exception_index = vector;
4308
            env->error_code = event_inj_err;
4309
            env->exception_is_int = 0;
4310
            env->exception_next_eip = -1;
4311
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4312
            cpu_loop_exit(env);
4313
            break;
4314
        case SVM_EVTINJ_TYPE_SOFT:
4315
            env->exception_index = vector;
4316
            env->error_code = event_inj_err;
4317
            env->exception_is_int = 1;
4318
            env->exception_next_eip = EIP;
4319
            qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
4320
            cpu_loop_exit(env);
4321
            break;
4322
        }
4323
        qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
4324
                      env->error_code);
4325
    }
4326
}
4327

    
4328
void helper_vmmcall(void)
4329
{
4330
    helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4331
    raise_exception(env, EXCP06_ILLOP);
4332
}
4333

    
4334
void helper_vmload(int aflag)
4335
{
4336
    target_ulong addr;
4337

    
4338
    helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4339

    
4340
    if (aflag == 2) {
4341
        addr = EAX;
4342
    } else {
4343
        addr = (uint32_t)EAX;
4344
    }
4345

    
4346
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
4347
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4348
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4349
                  env->segs[R_FS].base);
4350

    
4351
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4352
                       env, R_FS);
4353
    svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4354
                       env, R_GS);
4355
    svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4356
                 &env->tr);
4357
    svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4358
                 &env->ldt);
4359

    
4360
#ifdef TARGET_X86_64
4361
    env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
4362
                                                 save.kernel_gs_base));
4363
    env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4364
    env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4365
    env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4366
#endif
4367
    env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4368
    env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4369
    env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
4370
                                                 save.sysenter_esp));
4371
    env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
4372
                                                 save.sysenter_eip));
4373
}
4374

    
4375
void helper_vmsave(int aflag)
4376
{
4377
    target_ulong addr;
4378

    
4379
    helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4380

    
4381
    if (aflag == 2) {
4382
        addr = EAX;
4383
    } else {
4384
        addr = (uint32_t)EAX;
4385
    }
4386

    
4387
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
4388
                  "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4389
                  addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4390
                  env->segs[R_FS].base);
4391

    
4392
    svm_save_seg(addr + offsetof(struct vmcb, save.fs),
4393
                 &env->segs[R_FS]);
4394
    svm_save_seg(addr + offsetof(struct vmcb, save.gs),
4395
                 &env->segs[R_GS]);
4396
    svm_save_seg(addr + offsetof(struct vmcb, save.tr),
4397
                 &env->tr);
4398
    svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
4399
                 &env->ldt);
4400

    
4401
#ifdef TARGET_X86_64
4402
    stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
4403
             env->kernelgsbase);
4404
    stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4405
    stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4406
    stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4407
#endif
4408
    stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4409
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4410
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
4411
             env->sysenter_esp);
4412
    stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
4413
             env->sysenter_eip);
4414
}
4415

    
4416
void helper_stgi(void)
4417
{
4418
    helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
4419
    env->hflags2 |= HF2_GIF_MASK;
4420
}
4421

    
4422
void helper_clgi(void)
4423
{
4424
    helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
4425
    env->hflags2 &= ~HF2_GIF_MASK;
4426
}
4427

    
4428
void helper_skinit(void)
4429
{
4430
    helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
4431
    /* XXX: not implemented */
4432
    raise_exception(env, EXCP06_ILLOP);
4433
}
4434

    
4435
void helper_invlpga(int aflag)
4436
{
4437
    target_ulong addr;
4438

    
4439
    helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
4440

    
4441
    if (aflag == 2) {
4442
        addr = EAX;
4443
    } else {
4444
        addr = (uint32_t)EAX;
4445
    }
4446

    
4447
    /* XXX: could use the ASID to see if it is needed to do the
4448
       flush */
4449
    tlb_flush_page(env, addr);
4450
}
4451

    
4452
void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4453
{
4454
    if (likely(!(env->hflags & HF_SVMI_MASK))) {
4455
        return;
4456
    }
4457
    switch (type) {
4458
    case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4459
        if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
4460
            helper_vmexit(type, param);
4461
        }
4462
        break;
4463
    case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4464
        if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
4465
            helper_vmexit(type, param);
4466
        }
4467
        break;
4468
    case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
4469
        if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
4470
            helper_vmexit(type, param);
4471
        }
4472
        break;
4473
    case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
4474
        if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
4475
            helper_vmexit(type, param);
4476
        }
4477
        break;
4478
    case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
4479
        if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
4480
            helper_vmexit(type, param);
4481
        }
4482
        break;
4483
    case SVM_EXIT_MSR:
4484
        if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
4485
            /* FIXME: this should be read in at vmrun (faster this way?) */
4486
            uint64_t addr = ldq_phys(env->vm_vmcb +
4487
                                     offsetof(struct vmcb,
4488
                                              control.msrpm_base_pa));
4489
            uint32_t t0, t1;
4490

    
4491
            switch ((uint32_t)ECX) {
4492
            case 0 ... 0x1fff:
4493
                t0 = (ECX * 2) % 8;
4494
                t1 = (ECX * 2) / 8;
4495
                break;
4496
            case 0xc0000000 ... 0xc0001fff:
4497
                t0 = (8192 + ECX - 0xc0000000) * 2;
4498
                t1 = (t0 / 8);
4499
                t0 %= 8;
4500
                break;
4501
            case 0xc0010000 ... 0xc0011fff:
4502
                t0 = (16384 + ECX - 0xc0010000) * 2;
4503
                t1 = (t0 / 8);
4504
                t0 %= 8;
4505
                break;
4506
            default:
4507
                helper_vmexit(type, param);
4508
                t0 = 0;
4509
                t1 = 0;
4510
                break;
4511
            }
4512
            if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
4513
                helper_vmexit(type, param);
4514
            }
4515
        }
4516
        break;
4517
    default:
4518
        if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
4519
            helper_vmexit(type, param);
4520
        }
4521
        break;
4522
    }
4523
}
4524

    
4525
void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
4526
                                   uint64_t param)
4527
{
4528
    CPUX86State *saved_env;
4529

    
4530
    saved_env = env;
4531
    env = env1;
4532
    helper_svm_check_intercept_param(type, param);
4533
    env = saved_env;
4534
}
4535

    
4536
void helper_svm_check_io(uint32_t port, uint32_t param,
4537
                         uint32_t next_eip_addend)
4538
{
4539
    if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
4540
        /* FIXME: this should be read in at vmrun (faster this way?) */
4541
        uint64_t addr = ldq_phys(env->vm_vmcb +
4542
                                 offsetof(struct vmcb, control.iopm_base_pa));
4543
        uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4544

    
4545
        if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
4546
            /* next EIP */
4547
            stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
4548
                     env->eip + next_eip_addend);
4549
            helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
4550
        }
4551
    }
4552
}
4553

    
4554
/* Note: currently only 32 bits of exit_code are used */
4555
void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4556
{
4557
    uint32_t int_ctl;
4558

    
4559
    qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
4560
                  PRIx64 ", " TARGET_FMT_lx ")!\n",
4561
                  exit_code, exit_info_1,
4562
                  ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
4563
                                                   control.exit_info_2)),
4564
                  EIP);
4565

    
4566
    if (env->hflags & HF_INHIBIT_IRQ_MASK) {
4567
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
4568
                 SVM_INTERRUPT_SHADOW_MASK);
4569
        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4570
    } else {
4571
        stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4572
    }
4573

    
4574
    /* Save the VM state in the vmcb */
4575
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
4576
                 &env->segs[R_ES]);
4577
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4578
                 &env->segs[R_CS]);
4579
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4580
                 &env->segs[R_SS]);
4581
    svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4582
                 &env->segs[R_DS]);
4583

    
4584
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
4585
             env->gdt.base);
4586
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
4587
             env->gdt.limit);
4588

    
4589
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
4590
             env->idt.base);
4591
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
4592
             env->idt.limit);
4593

    
4594
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4595
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4596
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4597
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4598
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4599

    
4600
    int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4601
    int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
4602
    int_ctl |= env->v_tpr & V_TPR_MASK;
4603
    if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
4604
        int_ctl |= V_IRQ_MASK;
4605
    }
4606
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4607

    
4608
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
4609
             cpu_compute_eflags(env));
4610
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4611
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4612
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4613
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4614
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4615
    stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
4616
             env->hflags & HF_CPL_MASK);
4617

    
4618
    /* Reload the host state from vm_hsave */
4619
    env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4620
    env->hflags &= ~HF_SVMI_MASK;
4621
    env->intercept = 0;
4622
    env->intercept_exceptions = 0;
4623
    env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4624
    env->tsc_offset = 0;
4625

    
4626
    env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4627
                                                       save.gdtr.base));
4628
    env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4629
                                                       save.gdtr.limit));
4630

    
4631
    env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4632
                                                       save.idtr.base));
4633
    env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
4634
                                                       save.idtr.limit));
4635

    
4636
    cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4637
                                                              save.cr0)) |
4638
                       CR0_PE_MASK);
4639
    cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4640
                                                              save.cr4)));
4641
    cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4642
                                                              save.cr3)));
4643
    /* we need to set the efer after the crs so the hidden flags get
4644
       set properly */
4645
    cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4646
                                                         save.efer)));
4647
    env->eflags = 0;
4648
    cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
4649
                                                           save.rflags)),
4650
                    ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4651
    CC_OP = CC_OP_EFLAGS;
4652

    
4653
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
4654
                       env, R_ES);
4655
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
4656
                       env, R_CS);
4657
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
4658
                       env, R_SS);
4659
    svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
4660
                       env, R_DS);
4661

    
4662
    EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4663
    ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4664
    EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4665

    
4666
    env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4667
    env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4668

    
4669
    /* other setups */
4670
    cpu_x86_set_cpl(env, 0);
4671
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
4672
             exit_code);
4673
    stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
4674
             exit_info_1);
4675

    
4676
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
4677
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4678
                                              control.event_inj)));
4679
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
4680
             ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
4681
                                              control.event_inj_err)));
4682
    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
4683

    
4684
    env->hflags2 &= ~HF2_GIF_MASK;
4685
    /* FIXME: Resets the current ASID register to zero (host ASID). */
4686

    
4687
    /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4688

    
4689
    /* Clears the TSC_OFFSET inside the processor. */
4690

    
4691
    /* If the host is in PAE mode, the processor reloads the host's PDPEs
4692
       from the page table indicated the host's CR3. If the PDPEs contain
4693
       illegal state, the processor causes a shutdown. */
4694

    
4695
    /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4696
    env->cr[0] |= CR0_PE_MASK;
4697
    env->eflags &= ~VM_MASK;
4698

    
4699
    /* Disables all breakpoints in the host DR7 register. */
4700

    
4701
    /* Checks the reloaded host state for consistency. */
4702

    
4703
    /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4704
       host's code segment or non-canonical (in the case of long mode), a
4705
       #GP fault is delivered inside the host. */
4706

    
4707
    /* remove any pending exception */
4708
    env->exception_index = -1;
4709
    env->error_code = 0;
4710
    env->old_exception = -1;
4711

    
4712
    cpu_loop_exit(env);
4713
}
4714

    
4715
void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
4716
{
4717
    env = nenv;
4718
    helper_vmexit(exit_code, exit_info_1);
4719
}
4720

    
4721
#endif
4722

    
4723
#define SHIFT 0
4724
#include "cc_helper_template.h"
4725
#undef SHIFT
4726

    
4727
#define SHIFT 1
4728
#include "cc_helper_template.h"
4729
#undef SHIFT
4730

    
4731
#define SHIFT 2
4732
#include "cc_helper_template.h"
4733
#undef SHIFT
4734

    
4735
#ifdef TARGET_X86_64
4736

    
4737
#define SHIFT 3
4738
#include "cc_helper_template.h"
4739
#undef SHIFT
4740

    
4741
#endif
4742

    
4743
#define SHIFT 0
4744
#include "shift_helper_template.h"
4745
#undef SHIFT
4746

    
4747
#define SHIFT 1
4748
#include "shift_helper_template.h"
4749
#undef SHIFT
4750

    
4751
#define SHIFT 2
4752
#include "shift_helper_template.h"
4753
#undef SHIFT
4754

    
4755
#ifdef TARGET_X86_64
4756
#define SHIFT 3
4757
#include "shift_helper_template.h"
4758
#undef SHIFT
4759
#endif
4760

    
4761
/* bit operations */
4762
target_ulong helper_bsf(target_ulong t0)
4763
{
4764
    int count;
4765
    target_ulong res;
4766

    
4767
    res = t0;
4768
    count = 0;
4769
    while ((res & 1) == 0) {
4770
        count++;
4771
        res >>= 1;
4772
    }
4773
    return count;
4774
}
4775

    
4776
target_ulong helper_lzcnt(target_ulong t0, int wordsize)
4777
{
4778
    int count;
4779
    target_ulong res, mask;
4780

    
4781
    if (wordsize > 0 && t0 == 0) {
4782
        return wordsize;
4783
    }
4784
    res = t0;
4785
    count = TARGET_LONG_BITS - 1;
4786
    mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
4787
    while ((res & mask) == 0) {
4788
        count--;
4789
        res <<= 1;
4790
    }
4791
    if (wordsize > 0) {
4792
        return wordsize - 1 - count;
4793
    }
4794
    return count;
4795
}
4796

    
4797
target_ulong helper_bsr(target_ulong t0)
4798
{
4799
    return helper_lzcnt(t0, 0);
4800
}
4801

    
4802
static int compute_all_eflags(void)
4803
{
4804
    return CC_SRC;
4805
}
4806

    
4807
static int compute_c_eflags(void)
4808
{
4809
    return CC_SRC & CC_C;
4810
}
4811

    
4812
uint32_t helper_cc_compute_all(int op)
4813
{
4814
    switch (op) {
4815
    default: /* should never happen */
4816
        return 0;
4817

    
4818
    case CC_OP_EFLAGS:
4819
        return compute_all_eflags();
4820

    
4821
    case CC_OP_MULB:
4822
        return compute_all_mulb();
4823
    case CC_OP_MULW:
4824
        return compute_all_mulw();
4825
    case CC_OP_MULL:
4826
        return compute_all_mull();
4827

    
4828
    case CC_OP_ADDB:
4829
        return compute_all_addb();
4830
    case CC_OP_ADDW:
4831
        return compute_all_addw();
4832
    case CC_OP_ADDL:
4833
        return compute_all_addl();
4834

    
4835
    case CC_OP_ADCB:
4836
        return compute_all_adcb();
4837
    case CC_OP_ADCW:
4838
        return compute_all_adcw();
4839
    case CC_OP_ADCL:
4840
        return compute_all_adcl();
4841

    
4842
    case CC_OP_SUBB:
4843
        return compute_all_subb();
4844
    case CC_OP_SUBW:
4845
        return compute_all_subw();
4846
    case CC_OP_SUBL:
4847
        return compute_all_subl();
4848

    
4849
    case CC_OP_SBBB:
4850
        return compute_all_sbbb();
4851
    case CC_OP_SBBW:
4852
        return compute_all_sbbw();
4853
    case CC_OP_SBBL:
4854
        return compute_all_sbbl();
4855

    
4856
    case CC_OP_LOGICB:
4857
        return compute_all_logicb();
4858
    case CC_OP_LOGICW:
4859
        return compute_all_logicw();
4860
    case CC_OP_LOGICL:
4861
        return compute_all_logicl();
4862

    
4863
    case CC_OP_INCB:
4864
        return compute_all_incb();
4865
    case CC_OP_INCW:
4866
        return compute_all_incw();
4867
    case CC_OP_INCL:
4868
        return compute_all_incl();
4869

    
4870
    case CC_OP_DECB:
4871
        return compute_all_decb();
4872
    case CC_OP_DECW:
4873
        return compute_all_decw();
4874
    case CC_OP_DECL:
4875
        return compute_all_decl();
4876

    
4877
    case CC_OP_SHLB:
4878
        return compute_all_shlb();
4879
    case CC_OP_SHLW:
4880
        return compute_all_shlw();
4881
    case CC_OP_SHLL:
4882
        return compute_all_shll();
4883

    
4884
    case CC_OP_SARB:
4885
        return compute_all_sarb();
4886
    case CC_OP_SARW:
4887
        return compute_all_sarw();
4888
    case CC_OP_SARL:
4889
        return compute_all_sarl();
4890

    
4891
#ifdef TARGET_X86_64
4892
    case CC_OP_MULQ:
4893
        return compute_all_mulq();
4894

    
4895
    case CC_OP_ADDQ:
4896
        return compute_all_addq();
4897

    
4898
    case CC_OP_ADCQ:
4899
        return compute_all_adcq();
4900

    
4901
    case CC_OP_SUBQ:
4902
        return compute_all_subq();
4903

    
4904
    case CC_OP_SBBQ:
4905
        return compute_all_sbbq();
4906

    
4907
    case CC_OP_LOGICQ:
4908
        return compute_all_logicq();
4909

    
4910
    case CC_OP_INCQ:
4911
        return compute_all_incq();
4912

    
4913
    case CC_OP_DECQ:
4914
        return compute_all_decq();
4915

    
4916
    case CC_OP_SHLQ:
4917
        return compute_all_shlq();
4918

    
4919
    case CC_OP_SARQ:
4920
        return compute_all_sarq();
4921
#endif
4922
    }
4923
}
4924

    
4925
uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
4926
{
4927
    CPUX86State *saved_env;
4928
    uint32_t ret;
4929

    
4930
    saved_env = env;
4931
    env = env1;
4932
    ret = helper_cc_compute_all(op);
4933
    env = saved_env;
4934
    return ret;
4935
}
4936

    
4937
uint32_t helper_cc_compute_c(int op)
4938
{
4939
    switch (op) {
4940
    default: /* should never happen */
4941
        return 0;
4942

    
4943
    case CC_OP_EFLAGS:
4944
        return compute_c_eflags();
4945

    
4946
    case CC_OP_MULB:
4947
        return compute_c_mull();
4948
    case CC_OP_MULW:
4949
        return compute_c_mull();
4950
    case CC_OP_MULL:
4951
        return compute_c_mull();
4952

    
4953
    case CC_OP_ADDB:
4954
        return compute_c_addb();
4955
    case CC_OP_ADDW:
4956
        return compute_c_addw();
4957
    case CC_OP_ADDL:
4958
        return compute_c_addl();
4959

    
4960
    case CC_OP_ADCB:
4961
        return compute_c_adcb();
4962
    case CC_OP_ADCW:
4963
        return compute_c_adcw();
4964
    case CC_OP_ADCL:
4965
        return compute_c_adcl();
4966

    
4967
    case CC_OP_SUBB:
4968
        return compute_c_subb();
4969
    case CC_OP_SUBW:
4970
        return compute_c_subw();
4971
    case CC_OP_SUBL:
4972
        return compute_c_subl();
4973

    
4974
    case CC_OP_SBBB:
4975
        return compute_c_sbbb();
4976
    case CC_OP_SBBW:
4977
        return compute_c_sbbw();
4978
    case CC_OP_SBBL:
4979
        return compute_c_sbbl();
4980

    
4981
    case CC_OP_LOGICB:
4982
        return compute_c_logicb();
4983
    case CC_OP_LOGICW:
4984
        return compute_c_logicw();
4985
    case CC_OP_LOGICL:
4986
        return compute_c_logicl();
4987

    
4988
    case CC_OP_INCB:
4989
        return compute_c_incl();
4990
    case CC_OP_INCW:
4991
        return compute_c_incl();
4992
    case CC_OP_INCL:
4993
        return compute_c_incl();
4994

    
4995
    case CC_OP_DECB:
4996
        return compute_c_incl();
4997
    case CC_OP_DECW:
4998
        return compute_c_incl();
4999
    case CC_OP_DECL:
5000
        return compute_c_incl();
5001

    
5002
    case CC_OP_SHLB:
5003
        return compute_c_shlb();
5004
    case CC_OP_SHLW:
5005
        return compute_c_shlw();
5006
    case CC_OP_SHLL:
5007
        return compute_c_shll();
5008

    
5009
    case CC_OP_SARB:
5010
        return compute_c_sarl();
5011
    case CC_OP_SARW:
5012
        return compute_c_sarl();
5013
    case CC_OP_SARL:
5014
        return compute_c_sarl();
5015

    
5016
#ifdef TARGET_X86_64
5017
    case CC_OP_MULQ:
5018
        return compute_c_mull();
5019

    
5020
    case CC_OP_ADDQ:
5021
        return compute_c_addq();
5022

    
5023
    case CC_OP_ADCQ:
5024
        return compute_c_adcq();
5025

    
5026
    case CC_OP_SUBQ:
5027
        return compute_c_subq();
5028

    
5029
    case CC_OP_SBBQ:
5030
        return compute_c_sbbq();
5031

    
5032
    case CC_OP_LOGICQ:
5033
        return compute_c_logicq();
5034

    
5035
    case CC_OP_INCQ:
5036
        return compute_c_incl();
5037

    
5038
    case CC_OP_DECQ:
5039
        return compute_c_incl();
5040

    
5041
    case CC_OP_SHLQ:
5042
        return compute_c_shlq();
5043

    
5044
    case CC_OP_SARQ:
5045
        return compute_c_sarl();
5046
#endif
5047
    }
5048
}