Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ e7701825

History | View | Annotate | Download (37.1 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30
#include "kvm_x86.h"
31

    
32
//#define DEBUG_MMU
33

    
34
/* NOTE: must be called outside the CPU execute loop */
35
void cpu_reset(CPUX86State *env)
36
{
37
    int i;
38

    
39
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
40
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
41
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
42
    }
43

    
44
    memset(env, 0, offsetof(CPUX86State, breakpoints));
45

    
46
    tlb_flush(env, 1);
47

    
48
    env->old_exception = -1;
49

    
50
    /* init to reset state */
51

    
52
#ifdef CONFIG_SOFTMMU
53
    env->hflags |= HF_SOFTMMU_MASK;
54
#endif
55
    env->hflags2 |= HF2_GIF_MASK;
56

    
57
    cpu_x86_update_cr0(env, 0x60000010);
58
    env->a20_mask = ~0x0;
59
    env->smbase = 0x30000;
60

    
61
    env->idt.limit = 0xffff;
62
    env->gdt.limit = 0xffff;
63
    env->ldt.limit = 0xffff;
64
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
65
    env->tr.limit = 0xffff;
66
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
67

    
68
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
69
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
70
                           DESC_R_MASK | DESC_A_MASK);
71
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
72
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
73
                           DESC_A_MASK);
74
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
75
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76
                           DESC_A_MASK);
77
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
78
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79
                           DESC_A_MASK);
80
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
81
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82
                           DESC_A_MASK);
83
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
84
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85
                           DESC_A_MASK);
86

    
87
    env->eip = 0xfff0;
88
    env->regs[R_EDX] = env->cpuid_version;
89

    
90
    env->eflags = 0x2;
91

    
92
    /* FPU init */
93
    for(i = 0;i < 8; i++)
94
        env->fptags[i] = 1;
95
    env->fpuc = 0x37f;
96

    
97
    env->mxcsr = 0x1f80;
98

    
99
    memset(env->dr, 0, sizeof(env->dr));
100
    env->dr[6] = DR6_FIXED_1;
101
    env->dr[7] = DR7_FIXED_1;
102
    cpu_breakpoint_remove_all(env, BP_CPU);
103
    cpu_watchpoint_remove_all(env, BP_CPU);
104

    
105
    env->mcg_status = 0;
106
}
107

    
108
void cpu_x86_close(CPUX86State *env)
109
{
110
    qemu_free(env);
111
}
112

    
113
/***********************************************************/
114
/* x86 debug */
115

    
116
static const char *cc_op_str[] = {
117
    "DYNAMIC",
118
    "EFLAGS",
119

    
120
    "MULB",
121
    "MULW",
122
    "MULL",
123
    "MULQ",
124

    
125
    "ADDB",
126
    "ADDW",
127
    "ADDL",
128
    "ADDQ",
129

    
130
    "ADCB",
131
    "ADCW",
132
    "ADCL",
133
    "ADCQ",
134

    
135
    "SUBB",
136
    "SUBW",
137
    "SUBL",
138
    "SUBQ",
139

    
140
    "SBBB",
141
    "SBBW",
142
    "SBBL",
143
    "SBBQ",
144

    
145
    "LOGICB",
146
    "LOGICW",
147
    "LOGICL",
148
    "LOGICQ",
149

    
150
    "INCB",
151
    "INCW",
152
    "INCL",
153
    "INCQ",
154

    
155
    "DECB",
156
    "DECW",
157
    "DECL",
158
    "DECQ",
159

    
160
    "SHLB",
161
    "SHLW",
162
    "SHLL",
163
    "SHLQ",
164

    
165
    "SARB",
166
    "SARW",
167
    "SARL",
168
    "SARQ",
169
};
170

    
171
static void
172
cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
173
                       int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
174
                       const char *name, struct SegmentCache *sc)
175
{
176
#ifdef TARGET_X86_64
177
    if (env->hflags & HF_CS64_MASK) {
178
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
179
                    sc->selector, sc->base, sc->limit, sc->flags);
180
    } else
181
#endif
182
    {
183
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
184
                    (uint32_t)sc->base, sc->limit, sc->flags);
185
    }
186

    
187
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
188
        goto done;
189

    
190
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
191
    if (sc->flags & DESC_S_MASK) {
192
        if (sc->flags & DESC_CS_MASK) {
193
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
194
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
195
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
196
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
197
        } else {
198
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
199
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
200
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
201
        }
202
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
203
    } else {
204
        static const char *sys_type_name[2][16] = {
205
            { /* 32 bit mode */
206
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
207
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
208
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
209
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
210
            },
211
            { /* 64 bit mode */
212
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
213
                "Reserved", "Reserved", "Reserved", "Reserved",
214
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
215
                "Reserved", "IntGate64", "TrapGate64"
216
            }
217
        };
218
        cpu_fprintf(f, "%s",
219
                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
220
                                 [(sc->flags & DESC_TYPE_MASK)
221
                                  >> DESC_TYPE_SHIFT]);
222
    }
223
done:
224
    cpu_fprintf(f, "\n");
225
}
226

    
227
void cpu_dump_state(CPUState *env, FILE *f,
228
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
229
                    int flags)
230
{
231
    int eflags, i, nb;
232
    char cc_op_name[32];
233
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
234

    
235
    cpu_synchronize_state(env);
236

    
237
    eflags = env->eflags;
238
#ifdef TARGET_X86_64
239
    if (env->hflags & HF_CS64_MASK) {
240
        cpu_fprintf(f,
241
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
242
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
243
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
244
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
245
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
246
                    env->regs[R_EAX],
247
                    env->regs[R_EBX],
248
                    env->regs[R_ECX],
249
                    env->regs[R_EDX],
250
                    env->regs[R_ESI],
251
                    env->regs[R_EDI],
252
                    env->regs[R_EBP],
253
                    env->regs[R_ESP],
254
                    env->regs[8],
255
                    env->regs[9],
256
                    env->regs[10],
257
                    env->regs[11],
258
                    env->regs[12],
259
                    env->regs[13],
260
                    env->regs[14],
261
                    env->regs[15],
262
                    env->eip, eflags,
263
                    eflags & DF_MASK ? 'D' : '-',
264
                    eflags & CC_O ? 'O' : '-',
265
                    eflags & CC_S ? 'S' : '-',
266
                    eflags & CC_Z ? 'Z' : '-',
267
                    eflags & CC_A ? 'A' : '-',
268
                    eflags & CC_P ? 'P' : '-',
269
                    eflags & CC_C ? 'C' : '-',
270
                    env->hflags & HF_CPL_MASK,
271
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
272
                    (env->a20_mask >> 20) & 1,
273
                    (env->hflags >> HF_SMM_SHIFT) & 1,
274
                    env->halted);
275
    } else
276
#endif
277
    {
278
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
279
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
280
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
281
                    (uint32_t)env->regs[R_EAX],
282
                    (uint32_t)env->regs[R_EBX],
283
                    (uint32_t)env->regs[R_ECX],
284
                    (uint32_t)env->regs[R_EDX],
285
                    (uint32_t)env->regs[R_ESI],
286
                    (uint32_t)env->regs[R_EDI],
287
                    (uint32_t)env->regs[R_EBP],
288
                    (uint32_t)env->regs[R_ESP],
289
                    (uint32_t)env->eip, eflags,
290
                    eflags & DF_MASK ? 'D' : '-',
291
                    eflags & CC_O ? 'O' : '-',
292
                    eflags & CC_S ? 'S' : '-',
293
                    eflags & CC_Z ? 'Z' : '-',
294
                    eflags & CC_A ? 'A' : '-',
295
                    eflags & CC_P ? 'P' : '-',
296
                    eflags & CC_C ? 'C' : '-',
297
                    env->hflags & HF_CPL_MASK,
298
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
299
                    (env->a20_mask >> 20) & 1,
300
                    (env->hflags >> HF_SMM_SHIFT) & 1,
301
                    env->halted);
302
    }
303

    
304
    for(i = 0; i < 6; i++) {
305
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
306
                               &env->segs[i]);
307
    }
308
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
309
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
310

    
311
#ifdef TARGET_X86_64
312
    if (env->hflags & HF_LMA_MASK) {
313
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
314
                    env->gdt.base, env->gdt.limit);
315
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
316
                    env->idt.base, env->idt.limit);
317
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
318
                    (uint32_t)env->cr[0],
319
                    env->cr[2],
320
                    env->cr[3],
321
                    (uint32_t)env->cr[4]);
322
        for(i = 0; i < 4; i++)
323
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
324
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
325
                    env->dr[6], env->dr[7]);
326
    } else
327
#endif
328
    {
329
        cpu_fprintf(f, "GDT=     %08x %08x\n",
330
                    (uint32_t)env->gdt.base, env->gdt.limit);
331
        cpu_fprintf(f, "IDT=     %08x %08x\n",
332
                    (uint32_t)env->idt.base, env->idt.limit);
333
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
334
                    (uint32_t)env->cr[0],
335
                    (uint32_t)env->cr[2],
336
                    (uint32_t)env->cr[3],
337
                    (uint32_t)env->cr[4]);
338
        for(i = 0; i < 4; i++)
339
            cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
340
        cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
341
    }
342
    if (flags & X86_DUMP_CCOP) {
343
        if ((unsigned)env->cc_op < CC_OP_NB)
344
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
345
        else
346
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
347
#ifdef TARGET_X86_64
348
        if (env->hflags & HF_CS64_MASK) {
349
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
350
                        env->cc_src, env->cc_dst,
351
                        cc_op_name);
352
        } else
353
#endif
354
        {
355
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
356
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
357
                        cc_op_name);
358
        }
359
    }
360
    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
361
    if (flags & X86_DUMP_FPU) {
362
        int fptag;
363
        fptag = 0;
364
        for(i = 0; i < 8; i++) {
365
            fptag |= ((!env->fptags[i]) << i);
366
        }
367
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
368
                    env->fpuc,
369
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
370
                    env->fpstt,
371
                    fptag,
372
                    env->mxcsr);
373
        for(i=0;i<8;i++) {
374
#if defined(USE_X86LDOUBLE)
375
            union {
376
                long double d;
377
                struct {
378
                    uint64_t lower;
379
                    uint16_t upper;
380
                } l;
381
            } tmp;
382
            tmp.d = env->fpregs[i].d;
383
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
384
                        i, tmp.l.lower, tmp.l.upper);
385
#else
386
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
387
                        i, env->fpregs[i].mmx.q);
388
#endif
389
            if ((i & 1) == 1)
390
                cpu_fprintf(f, "\n");
391
            else
392
                cpu_fprintf(f, " ");
393
        }
394
        if (env->hflags & HF_CS64_MASK)
395
            nb = 16;
396
        else
397
            nb = 8;
398
        for(i=0;i<nb;i++) {
399
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
400
                        i,
401
                        env->xmm_regs[i].XMM_L(3),
402
                        env->xmm_regs[i].XMM_L(2),
403
                        env->xmm_regs[i].XMM_L(1),
404
                        env->xmm_regs[i].XMM_L(0));
405
            if ((i & 1) == 1)
406
                cpu_fprintf(f, "\n");
407
            else
408
                cpu_fprintf(f, " ");
409
        }
410
    }
411
}
412

    
413
/***********************************************************/
414
/* x86 mmu */
415
/* XXX: add PGE support */
416

    
417
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
418
{
419
    a20_state = (a20_state != 0);
420
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
421
#if defined(DEBUG_MMU)
422
        printf("A20 update: a20=%d\n", a20_state);
423
#endif
424
        /* if the cpu is currently executing code, we must unlink it and
425
           all the potentially executing TB */
426
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
427

    
428
        /* when a20 is changed, all the MMU mappings are invalid, so
429
           we must flush everything */
430
        tlb_flush(env, 1);
431
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
432
    }
433
}
434

    
435
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
436
{
437
    int pe_state;
438

    
439
#if defined(DEBUG_MMU)
440
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
441
#endif
442
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
443
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
444
        tlb_flush(env, 1);
445
    }
446

    
447
#ifdef TARGET_X86_64
448
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
449
        (env->efer & MSR_EFER_LME)) {
450
        /* enter in long mode */
451
        /* XXX: generate an exception */
452
        if (!(env->cr[4] & CR4_PAE_MASK))
453
            return;
454
        env->efer |= MSR_EFER_LMA;
455
        env->hflags |= HF_LMA_MASK;
456
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
457
               (env->efer & MSR_EFER_LMA)) {
458
        /* exit long mode */
459
        env->efer &= ~MSR_EFER_LMA;
460
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
461
        env->eip &= 0xffffffff;
462
    }
463
#endif
464
    env->cr[0] = new_cr0 | CR0_ET_MASK;
465

    
466
    /* update PE flag in hidden flags */
467
    pe_state = (env->cr[0] & CR0_PE_MASK);
468
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
469
    /* ensure that ADDSEG is always set in real mode */
470
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
471
    /* update FPU flags */
472
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
473
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
474
}
475

    
476
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
477
   the PDPT */
478
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
479
{
480
    env->cr[3] = new_cr3;
481
    if (env->cr[0] & CR0_PG_MASK) {
482
#if defined(DEBUG_MMU)
483
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
484
#endif
485
        tlb_flush(env, 0);
486
    }
487
}
488

    
489
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
490
{
491
#if defined(DEBUG_MMU)
492
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
493
#endif
494
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
495
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
496
        tlb_flush(env, 1);
497
    }
498
    /* SSE handling */
499
    if (!(env->cpuid_features & CPUID_SSE))
500
        new_cr4 &= ~CR4_OSFXSR_MASK;
501
    if (new_cr4 & CR4_OSFXSR_MASK)
502
        env->hflags |= HF_OSFXSR_MASK;
503
    else
504
        env->hflags &= ~HF_OSFXSR_MASK;
505

    
506
    env->cr[4] = new_cr4;
507
}
508

    
509
#if defined(CONFIG_USER_ONLY)
510

    
511
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
512
                             int is_write, int mmu_idx, int is_softmmu)
513
{
514
    /* user mode only emulation */
515
    is_write &= 1;
516
    env->cr[2] = addr;
517
    env->error_code = (is_write << PG_ERROR_W_BIT);
518
    env->error_code |= PG_ERROR_U_MASK;
519
    env->exception_index = EXCP0E_PAGE;
520
    return 1;
521
}
522

    
523
#else
524

    
525
/* XXX: This value should match the one returned by CPUID
526
 * and in exec.c */
527
# if defined(TARGET_X86_64)
528
# define PHYS_ADDR_MASK 0xfffffff000LL
529
# else
530
# define PHYS_ADDR_MASK 0xffffff000LL
531
# endif
532

    
533
/* return value:
534
   -1 = cannot handle fault
535
   0  = nothing more to do
536
   1  = generate PF fault
537
*/
538
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
539
                             int is_write1, int mmu_idx, int is_softmmu)
540
{
541
    uint64_t ptep, pte;
542
    target_ulong pde_addr, pte_addr;
543
    int error_code, is_dirty, prot, page_size, is_write, is_user;
544
    target_phys_addr_t paddr;
545
    uint32_t page_offset;
546
    target_ulong vaddr, virt_addr;
547

    
548
    is_user = mmu_idx == MMU_USER_IDX;
549
#if defined(DEBUG_MMU)
550
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
551
           addr, is_write1, is_user, env->eip);
552
#endif
553
    is_write = is_write1 & 1;
554

    
555
    if (!(env->cr[0] & CR0_PG_MASK)) {
556
        pte = addr;
557
        virt_addr = addr & TARGET_PAGE_MASK;
558
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
559
        page_size = 4096;
560
        goto do_mapping;
561
    }
562

    
563
    if (env->cr[4] & CR4_PAE_MASK) {
564
        uint64_t pde, pdpe;
565
        target_ulong pdpe_addr;
566

    
567
#ifdef TARGET_X86_64
568
        if (env->hflags & HF_LMA_MASK) {
569
            uint64_t pml4e_addr, pml4e;
570
            int32_t sext;
571

    
572
            /* test virtual address sign extension */
573
            sext = (int64_t)addr >> 47;
574
            if (sext != 0 && sext != -1) {
575
                env->error_code = 0;
576
                env->exception_index = EXCP0D_GPF;
577
                return 1;
578
            }
579

    
580
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
581
                env->a20_mask;
582
            pml4e = ldq_phys(pml4e_addr);
583
            if (!(pml4e & PG_PRESENT_MASK)) {
584
                error_code = 0;
585
                goto do_fault;
586
            }
587
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
588
                error_code = PG_ERROR_RSVD_MASK;
589
                goto do_fault;
590
            }
591
            if (!(pml4e & PG_ACCESSED_MASK)) {
592
                pml4e |= PG_ACCESSED_MASK;
593
                stl_phys_notdirty(pml4e_addr, pml4e);
594
            }
595
            ptep = pml4e ^ PG_NX_MASK;
596
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
597
                env->a20_mask;
598
            pdpe = ldq_phys(pdpe_addr);
599
            if (!(pdpe & PG_PRESENT_MASK)) {
600
                error_code = 0;
601
                goto do_fault;
602
            }
603
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
604
                error_code = PG_ERROR_RSVD_MASK;
605
                goto do_fault;
606
            }
607
            ptep &= pdpe ^ PG_NX_MASK;
608
            if (!(pdpe & PG_ACCESSED_MASK)) {
609
                pdpe |= PG_ACCESSED_MASK;
610
                stl_phys_notdirty(pdpe_addr, pdpe);
611
            }
612
        } else
613
#endif
614
        {
615
            /* XXX: load them when cr3 is loaded ? */
616
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
617
                env->a20_mask;
618
            pdpe = ldq_phys(pdpe_addr);
619
            if (!(pdpe & PG_PRESENT_MASK)) {
620
                error_code = 0;
621
                goto do_fault;
622
            }
623
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
624
        }
625

    
626
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
627
            env->a20_mask;
628
        pde = ldq_phys(pde_addr);
629
        if (!(pde & PG_PRESENT_MASK)) {
630
            error_code = 0;
631
            goto do_fault;
632
        }
633
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
634
            error_code = PG_ERROR_RSVD_MASK;
635
            goto do_fault;
636
        }
637
        ptep &= pde ^ PG_NX_MASK;
638
        if (pde & PG_PSE_MASK) {
639
            /* 2 MB page */
640
            page_size = 2048 * 1024;
641
            ptep ^= PG_NX_MASK;
642
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
643
                goto do_fault_protect;
644
            if (is_user) {
645
                if (!(ptep & PG_USER_MASK))
646
                    goto do_fault_protect;
647
                if (is_write && !(ptep & PG_RW_MASK))
648
                    goto do_fault_protect;
649
            } else {
650
                if ((env->cr[0] & CR0_WP_MASK) &&
651
                    is_write && !(ptep & PG_RW_MASK))
652
                    goto do_fault_protect;
653
            }
654
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
655
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
656
                pde |= PG_ACCESSED_MASK;
657
                if (is_dirty)
658
                    pde |= PG_DIRTY_MASK;
659
                stl_phys_notdirty(pde_addr, pde);
660
            }
661
            /* align to page_size */
662
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
663
            virt_addr = addr & ~(page_size - 1);
664
        } else {
665
            /* 4 KB page */
666
            if (!(pde & PG_ACCESSED_MASK)) {
667
                pde |= PG_ACCESSED_MASK;
668
                stl_phys_notdirty(pde_addr, pde);
669
            }
670
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
671
                env->a20_mask;
672
            pte = ldq_phys(pte_addr);
673
            if (!(pte & PG_PRESENT_MASK)) {
674
                error_code = 0;
675
                goto do_fault;
676
            }
677
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
678
                error_code = PG_ERROR_RSVD_MASK;
679
                goto do_fault;
680
            }
681
            /* combine pde and pte nx, user and rw protections */
682
            ptep &= pte ^ PG_NX_MASK;
683
            ptep ^= PG_NX_MASK;
684
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
685
                goto do_fault_protect;
686
            if (is_user) {
687
                if (!(ptep & PG_USER_MASK))
688
                    goto do_fault_protect;
689
                if (is_write && !(ptep & PG_RW_MASK))
690
                    goto do_fault_protect;
691
            } else {
692
                if ((env->cr[0] & CR0_WP_MASK) &&
693
                    is_write && !(ptep & PG_RW_MASK))
694
                    goto do_fault_protect;
695
            }
696
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
697
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
698
                pte |= PG_ACCESSED_MASK;
699
                if (is_dirty)
700
                    pte |= PG_DIRTY_MASK;
701
                stl_phys_notdirty(pte_addr, pte);
702
            }
703
            page_size = 4096;
704
            virt_addr = addr & ~0xfff;
705
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
706
        }
707
    } else {
708
        uint32_t pde;
709

    
710
        /* page directory entry */
711
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
712
            env->a20_mask;
713
        pde = ldl_phys(pde_addr);
714
        if (!(pde & PG_PRESENT_MASK)) {
715
            error_code = 0;
716
            goto do_fault;
717
        }
718
        /* if PSE bit is set, then we use a 4MB page */
719
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
720
            page_size = 4096 * 1024;
721
            if (is_user) {
722
                if (!(pde & PG_USER_MASK))
723
                    goto do_fault_protect;
724
                if (is_write && !(pde & PG_RW_MASK))
725
                    goto do_fault_protect;
726
            } else {
727
                if ((env->cr[0] & CR0_WP_MASK) &&
728
                    is_write && !(pde & PG_RW_MASK))
729
                    goto do_fault_protect;
730
            }
731
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
732
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
733
                pde |= PG_ACCESSED_MASK;
734
                if (is_dirty)
735
                    pde |= PG_DIRTY_MASK;
736
                stl_phys_notdirty(pde_addr, pde);
737
            }
738

    
739
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
740
            ptep = pte;
741
            virt_addr = addr & ~(page_size - 1);
742
        } else {
743
            if (!(pde & PG_ACCESSED_MASK)) {
744
                pde |= PG_ACCESSED_MASK;
745
                stl_phys_notdirty(pde_addr, pde);
746
            }
747

    
748
            /* page directory entry */
749
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
750
                env->a20_mask;
751
            pte = ldl_phys(pte_addr);
752
            if (!(pte & PG_PRESENT_MASK)) {
753
                error_code = 0;
754
                goto do_fault;
755
            }
756
            /* combine pde and pte user and rw protections */
757
            ptep = pte & pde;
758
            if (is_user) {
759
                if (!(ptep & PG_USER_MASK))
760
                    goto do_fault_protect;
761
                if (is_write && !(ptep & PG_RW_MASK))
762
                    goto do_fault_protect;
763
            } else {
764
                if ((env->cr[0] & CR0_WP_MASK) &&
765
                    is_write && !(ptep & PG_RW_MASK))
766
                    goto do_fault_protect;
767
            }
768
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
769
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
770
                pte |= PG_ACCESSED_MASK;
771
                if (is_dirty)
772
                    pte |= PG_DIRTY_MASK;
773
                stl_phys_notdirty(pte_addr, pte);
774
            }
775
            page_size = 4096;
776
            virt_addr = addr & ~0xfff;
777
        }
778
    }
779
    /* the page can be put in the TLB */
780
    prot = PAGE_READ;
781
    if (!(ptep & PG_NX_MASK))
782
        prot |= PAGE_EXEC;
783
    if (pte & PG_DIRTY_MASK) {
784
        /* only set write access if already dirty... otherwise wait
785
           for dirty access */
786
        if (is_user) {
787
            if (ptep & PG_RW_MASK)
788
                prot |= PAGE_WRITE;
789
        } else {
790
            if (!(env->cr[0] & CR0_WP_MASK) ||
791
                (ptep & PG_RW_MASK))
792
                prot |= PAGE_WRITE;
793
        }
794
    }
795
 do_mapping:
796
    pte = pte & env->a20_mask;
797

    
798
    /* Even if 4MB pages, we map only one 4KB page in the cache to
799
       avoid filling it too fast */
800
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
801
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
802
    vaddr = virt_addr + page_offset;
803

    
804
    tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
805
    return 0;
806
 do_fault_protect:
807
    error_code = PG_ERROR_P_MASK;
808
 do_fault:
809
    error_code |= (is_write << PG_ERROR_W_BIT);
810
    if (is_user)
811
        error_code |= PG_ERROR_U_MASK;
812
    if (is_write1 == 2 &&
813
        (env->efer & MSR_EFER_NXE) &&
814
        (env->cr[4] & CR4_PAE_MASK))
815
        error_code |= PG_ERROR_I_D_MASK;
816
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
817
        /* cr2 is not modified in case of exceptions */
818
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
819
                 addr);
820
    } else {
821
        env->cr[2] = addr;
822
    }
823
    env->error_code = error_code;
824
    env->exception_index = EXCP0E_PAGE;
825
    return 1;
826
}
827

    
828
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
829
{
830
    target_ulong pde_addr, pte_addr;
831
    uint64_t pte;
832
    target_phys_addr_t paddr;
833
    uint32_t page_offset;
834
    int page_size;
835

    
836
    if (env->cr[4] & CR4_PAE_MASK) {
837
        target_ulong pdpe_addr;
838
        uint64_t pde, pdpe;
839

    
840
#ifdef TARGET_X86_64
841
        if (env->hflags & HF_LMA_MASK) {
842
            uint64_t pml4e_addr, pml4e;
843
            int32_t sext;
844

    
845
            /* test virtual address sign extension */
846
            sext = (int64_t)addr >> 47;
847
            if (sext != 0 && sext != -1)
848
                return -1;
849

    
850
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
851
                env->a20_mask;
852
            pml4e = ldq_phys(pml4e_addr);
853
            if (!(pml4e & PG_PRESENT_MASK))
854
                return -1;
855

    
856
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
857
                env->a20_mask;
858
            pdpe = ldq_phys(pdpe_addr);
859
            if (!(pdpe & PG_PRESENT_MASK))
860
                return -1;
861
        } else
862
#endif
863
        {
864
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
865
                env->a20_mask;
866
            pdpe = ldq_phys(pdpe_addr);
867
            if (!(pdpe & PG_PRESENT_MASK))
868
                return -1;
869
        }
870

    
871
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
872
            env->a20_mask;
873
        pde = ldq_phys(pde_addr);
874
        if (!(pde & PG_PRESENT_MASK)) {
875
            return -1;
876
        }
877
        if (pde & PG_PSE_MASK) {
878
            /* 2 MB page */
879
            page_size = 2048 * 1024;
880
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
881
        } else {
882
            /* 4 KB page */
883
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
884
                env->a20_mask;
885
            page_size = 4096;
886
            pte = ldq_phys(pte_addr);
887
        }
888
        if (!(pte & PG_PRESENT_MASK))
889
            return -1;
890
    } else {
891
        uint32_t pde;
892

    
893
        if (!(env->cr[0] & CR0_PG_MASK)) {
894
            pte = addr;
895
            page_size = 4096;
896
        } else {
897
            /* page directory entry */
898
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
899
            pde = ldl_phys(pde_addr);
900
            if (!(pde & PG_PRESENT_MASK))
901
                return -1;
902
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
903
                pte = pde & ~0x003ff000; /* align to 4MB */
904
                page_size = 4096 * 1024;
905
            } else {
906
                /* page directory entry */
907
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
908
                pte = ldl_phys(pte_addr);
909
                if (!(pte & PG_PRESENT_MASK))
910
                    return -1;
911
                page_size = 4096;
912
            }
913
        }
914
        pte = pte & env->a20_mask;
915
    }
916

    
917
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
918
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
919
    return paddr;
920
}
921

    
922
void hw_breakpoint_insert(CPUState *env, int index)
923
{
924
    int type, err = 0;
925

    
926
    switch (hw_breakpoint_type(env->dr[7], index)) {
927
    case 0:
928
        if (hw_breakpoint_enabled(env->dr[7], index))
929
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
930
                                        &env->cpu_breakpoint[index]);
931
        break;
932
    case 1:
933
        type = BP_CPU | BP_MEM_WRITE;
934
        goto insert_wp;
935
    case 2:
936
         /* No support for I/O watchpoints yet */
937
        break;
938
    case 3:
939
        type = BP_CPU | BP_MEM_ACCESS;
940
    insert_wp:
941
        err = cpu_watchpoint_insert(env, env->dr[index],
942
                                    hw_breakpoint_len(env->dr[7], index),
943
                                    type, &env->cpu_watchpoint[index]);
944
        break;
945
    }
946
    if (err)
947
        env->cpu_breakpoint[index] = NULL;
948
}
949

    
950
void hw_breakpoint_remove(CPUState *env, int index)
951
{
952
    if (!env->cpu_breakpoint[index])
953
        return;
954
    switch (hw_breakpoint_type(env->dr[7], index)) {
955
    case 0:
956
        if (hw_breakpoint_enabled(env->dr[7], index))
957
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
958
        break;
959
    case 1:
960
    case 3:
961
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
962
        break;
963
    case 2:
964
        /* No support for I/O watchpoints yet */
965
        break;
966
    }
967
}
968

    
969
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
970
{
971
    target_ulong dr6;
972
    int reg, type;
973
    int hit_enabled = 0;
974

    
975
    dr6 = env->dr[6] & ~0xf;
976
    for (reg = 0; reg < 4; reg++) {
977
        type = hw_breakpoint_type(env->dr[7], reg);
978
        if ((type == 0 && env->dr[reg] == env->eip) ||
979
            ((type & 1) && env->cpu_watchpoint[reg] &&
980
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
981
            dr6 |= 1 << reg;
982
            if (hw_breakpoint_enabled(env->dr[7], reg))
983
                hit_enabled = 1;
984
        }
985
    }
986
    if (hit_enabled || force_dr6_update)
987
        env->dr[6] = dr6;
988
    return hit_enabled;
989
}
990

    
991
static CPUDebugExcpHandler *prev_debug_excp_handler;
992

    
993
void raise_exception_env(int exception_index, CPUState *env);
994

    
995
static void breakpoint_handler(CPUState *env)
996
{
997
    CPUBreakpoint *bp;
998

    
999
    if (env->watchpoint_hit) {
1000
        if (env->watchpoint_hit->flags & BP_CPU) {
1001
            env->watchpoint_hit = NULL;
1002
            if (check_hw_breakpoints(env, 0))
1003
                raise_exception_env(EXCP01_DB, env);
1004
            else
1005
                cpu_resume_from_signal(env, NULL);
1006
        }
1007
    } else {
1008
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1009
            if (bp->pc == env->eip) {
1010
                if (bp->flags & BP_CPU) {
1011
                    check_hw_breakpoints(env, 1);
1012
                    raise_exception_env(EXCP01_DB, env);
1013
                }
1014
                break;
1015
            }
1016
    }
1017
    if (prev_debug_excp_handler)
1018
        prev_debug_excp_handler(env);
1019
}
1020

    
1021
/* This should come from sysemu.h - if we could include it here... */
1022
void qemu_system_reset_request(void);
1023

    
1024
void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1025
                        uint64_t mcg_status, uint64_t addr, uint64_t misc)
1026
{
1027
    uint64_t mcg_cap = cenv->mcg_cap;
1028
    unsigned bank_num = mcg_cap & 0xff;
1029
    uint64_t *banks = cenv->mce_banks;
1030

    
1031
    if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1032
        return;
1033

    
1034
    if (kvm_enabled()) {
1035
        kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
1036
        return;
1037
    }
1038

    
1039
    /*
1040
     * if MSR_MCG_CTL is not all 1s, the uncorrected error
1041
     * reporting is disabled
1042
     */
1043
    if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1044
        cenv->mcg_ctl != ~(uint64_t)0)
1045
        return;
1046
    banks += 4 * bank;
1047
    /*
1048
     * if MSR_MCi_CTL is not all 1s, the uncorrected error
1049
     * reporting is disabled for the bank
1050
     */
1051
    if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1052
        return;
1053
    if (status & MCI_STATUS_UC) {
1054
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1055
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1056
            fprintf(stderr, "injects mce exception while previous "
1057
                    "one is in progress!\n");
1058
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1059
            qemu_system_reset_request();
1060
            return;
1061
        }
1062
        if (banks[1] & MCI_STATUS_VAL)
1063
            status |= MCI_STATUS_OVER;
1064
        banks[2] = addr;
1065
        banks[3] = misc;
1066
        cenv->mcg_status = mcg_status;
1067
        banks[1] = status;
1068
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1069
    } else if (!(banks[1] & MCI_STATUS_VAL)
1070
               || !(banks[1] & MCI_STATUS_UC)) {
1071
        if (banks[1] & MCI_STATUS_VAL)
1072
            status |= MCI_STATUS_OVER;
1073
        banks[2] = addr;
1074
        banks[3] = misc;
1075
        banks[1] = status;
1076
    } else
1077
        banks[1] |= MCI_STATUS_OVER;
1078
}
1079
#endif /* !CONFIG_USER_ONLY */
1080

    
1081
static void mce_init(CPUX86State *cenv)
1082
{
1083
    unsigned int bank, bank_num;
1084

    
1085
    if (((cenv->cpuid_version >> 8)&0xf) >= 6
1086
        && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1087
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1088
        cenv->mcg_ctl = ~(uint64_t)0;
1089
        bank_num = MCE_BANKS_DEF;
1090
        for (bank = 0; bank < bank_num; bank++)
1091
            cenv->mce_banks[bank*4] = ~(uint64_t)0;
1092
    }
1093
}
1094

    
1095
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1096
                            target_ulong *base, unsigned int *limit,
1097
                            unsigned int *flags)
1098
{
1099
    SegmentCache *dt;
1100
    target_ulong ptr;
1101
    uint32_t e1, e2;
1102
    int index;
1103

    
1104
    if (selector & 0x4)
1105
        dt = &env->ldt;
1106
    else
1107
        dt = &env->gdt;
1108
    index = selector & ~7;
1109
    ptr = dt->base + index;
1110
    if ((index + 7) > dt->limit
1111
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1112
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1113
        return 0;
1114

    
1115
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1116
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1117
    if (e2 & DESC_G_MASK)
1118
        *limit = (*limit << 12) | 0xfff;
1119
    *flags = e2;
1120

    
1121
    return 1;
1122
}
1123

    
1124
CPUX86State *cpu_x86_init(const char *cpu_model)
1125
{
1126
    CPUX86State *env;
1127
    static int inited;
1128

    
1129
    env = qemu_mallocz(sizeof(CPUX86State));
1130
    cpu_exec_init(env);
1131
    env->cpu_model_str = cpu_model;
1132

    
1133
    /* init various static tables */
1134
    if (!inited) {
1135
        inited = 1;
1136
        optimize_flags_init();
1137
#ifndef CONFIG_USER_ONLY
1138
        prev_debug_excp_handler =
1139
            cpu_set_debug_excp_handler(breakpoint_handler);
1140
#endif
1141
    }
1142
    if (cpu_x86_register(env, cpu_model) < 0) {
1143
        cpu_x86_close(env);
1144
        return NULL;
1145
    }
1146
    mce_init(env);
1147

    
1148
    qemu_init_vcpu(env);
1149

    
1150
    return env;
1151
}
1152

    
1153
#if !defined(CONFIG_USER_ONLY)
1154
void do_cpu_init(CPUState *env)
1155
{
1156
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1157
    cpu_reset(env);
1158
    env->interrupt_request = sipi;
1159
    apic_init_reset(env->apic_state);
1160
    env->halted = !cpu_is_bsp(env);
1161
}
1162

    
1163
void do_cpu_sipi(CPUState *env)
1164
{
1165
    apic_sipi(env->apic_state);
1166
}
1167
#else
1168
void do_cpu_init(CPUState *env)
1169
{
1170
}
1171
void do_cpu_sipi(CPUState *env)
1172
{
1173
}
1174
#endif