Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ b5f1aa64

History | View | Annotate | Download (40.6 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30
#ifndef CONFIG_USER_ONLY
31
#include "sysemu.h"
32
#include "monitor.h"
33
#endif
34

    
35
//#define DEBUG_MMU
36

    
37
/* NOTE: must be called outside the CPU execute loop */
38
void cpu_reset(CPUX86State *env)
39
{
40
    int i;
41

    
42
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
43
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
44
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
45
    }
46

    
47
    memset(env, 0, offsetof(CPUX86State, breakpoints));
48

    
49
    tlb_flush(env, 1);
50

    
51
    env->old_exception = -1;
52

    
53
    /* init to reset state */
54

    
55
#ifdef CONFIG_SOFTMMU
56
    env->hflags |= HF_SOFTMMU_MASK;
57
#endif
58
    env->hflags2 |= HF2_GIF_MASK;
59

    
60
    cpu_x86_update_cr0(env, 0x60000010);
61
    env->a20_mask = ~0x0;
62
    env->smbase = 0x30000;
63

    
64
    env->idt.limit = 0xffff;
65
    env->gdt.limit = 0xffff;
66
    env->ldt.limit = 0xffff;
67
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
68
    env->tr.limit = 0xffff;
69
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
70

    
71
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
72
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
73
                           DESC_R_MASK | DESC_A_MASK);
74
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
75
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76
                           DESC_A_MASK);
77
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
78
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79
                           DESC_A_MASK);
80
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
81
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82
                           DESC_A_MASK);
83
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
84
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85
                           DESC_A_MASK);
86
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
87
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
88
                           DESC_A_MASK);
89

    
90
    env->eip = 0xfff0;
91
    env->regs[R_EDX] = env->cpuid_version;
92

    
93
    env->eflags = 0x2;
94

    
95
    /* FPU init */
96
    for(i = 0;i < 8; i++)
97
        env->fptags[i] = 1;
98
    env->fpuc = 0x37f;
99

    
100
    env->mxcsr = 0x1f80;
101

    
102
    env->pat = 0x0007040600070406ULL;
103

    
104
    memset(env->dr, 0, sizeof(env->dr));
105
    env->dr[6] = DR6_FIXED_1;
106
    env->dr[7] = DR7_FIXED_1;
107
    cpu_breakpoint_remove_all(env, BP_CPU);
108
    cpu_watchpoint_remove_all(env, BP_CPU);
109
}
110

    
111
void cpu_x86_close(CPUX86State *env)
112
{
113
    qemu_free(env);
114
}
115

    
116
static void cpu_x86_version(CPUState *env, int *family, int *model)
117
{
118
    int cpuver = env->cpuid_version;
119

    
120
    if (family == NULL || model == NULL) {
121
        return;
122
    }
123

    
124
    *family = (cpuver >> 8) & 0x0f;
125
    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
126
}
127

    
128
/* Broadcast MCA signal for processor version 06H_EH and above */
129
int cpu_x86_support_mca_broadcast(CPUState *env)
130
{
131
    int family = 0;
132
    int model = 0;
133

    
134
    cpu_x86_version(env, &family, &model);
135
    if ((family == 6 && model >= 14) || family > 6) {
136
        return 1;
137
    }
138

    
139
    return 0;
140
}
141

    
142
/***********************************************************/
143
/* x86 debug */
144

    
145
static const char *cc_op_str[] = {
146
    "DYNAMIC",
147
    "EFLAGS",
148

    
149
    "MULB",
150
    "MULW",
151
    "MULL",
152
    "MULQ",
153

    
154
    "ADDB",
155
    "ADDW",
156
    "ADDL",
157
    "ADDQ",
158

    
159
    "ADCB",
160
    "ADCW",
161
    "ADCL",
162
    "ADCQ",
163

    
164
    "SUBB",
165
    "SUBW",
166
    "SUBL",
167
    "SUBQ",
168

    
169
    "SBBB",
170
    "SBBW",
171
    "SBBL",
172
    "SBBQ",
173

    
174
    "LOGICB",
175
    "LOGICW",
176
    "LOGICL",
177
    "LOGICQ",
178

    
179
    "INCB",
180
    "INCW",
181
    "INCL",
182
    "INCQ",
183

    
184
    "DECB",
185
    "DECW",
186
    "DECL",
187
    "DECQ",
188

    
189
    "SHLB",
190
    "SHLW",
191
    "SHLL",
192
    "SHLQ",
193

    
194
    "SARB",
195
    "SARW",
196
    "SARL",
197
    "SARQ",
198
};
199

    
200
static void
201
cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
202
                       const char *name, struct SegmentCache *sc)
203
{
204
#ifdef TARGET_X86_64
205
    if (env->hflags & HF_CS64_MASK) {
206
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
207
                    sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
208
    } else
209
#endif
210
    {
211
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
212
                    (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
213
    }
214

    
215
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
216
        goto done;
217

    
218
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
219
    if (sc->flags & DESC_S_MASK) {
220
        if (sc->flags & DESC_CS_MASK) {
221
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
222
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
223
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
224
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
225
        } else {
226
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
227
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
228
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
229
        }
230
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
231
    } else {
232
        static const char *sys_type_name[2][16] = {
233
            { /* 32 bit mode */
234
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
235
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
236
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
237
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
238
            },
239
            { /* 64 bit mode */
240
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
241
                "Reserved", "Reserved", "Reserved", "Reserved",
242
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
243
                "Reserved", "IntGate64", "TrapGate64"
244
            }
245
        };
246
        cpu_fprintf(f, "%s",
247
                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
248
                                 [(sc->flags & DESC_TYPE_MASK)
249
                                  >> DESC_TYPE_SHIFT]);
250
    }
251
done:
252
    cpu_fprintf(f, "\n");
253
}
254

    
255
#define DUMP_CODE_BYTES_TOTAL    50
256
#define DUMP_CODE_BYTES_BACKWARD 20
257

    
258
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
259
                    int flags)
260
{
261
    int eflags, i, nb;
262
    char cc_op_name[32];
263
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
264

    
265
    cpu_synchronize_state(env);
266

    
267
    eflags = env->eflags;
268
#ifdef TARGET_X86_64
269
    if (env->hflags & HF_CS64_MASK) {
270
        cpu_fprintf(f,
271
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
272
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
273
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
274
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
275
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
276
                    env->regs[R_EAX],
277
                    env->regs[R_EBX],
278
                    env->regs[R_ECX],
279
                    env->regs[R_EDX],
280
                    env->regs[R_ESI],
281
                    env->regs[R_EDI],
282
                    env->regs[R_EBP],
283
                    env->regs[R_ESP],
284
                    env->regs[8],
285
                    env->regs[9],
286
                    env->regs[10],
287
                    env->regs[11],
288
                    env->regs[12],
289
                    env->regs[13],
290
                    env->regs[14],
291
                    env->regs[15],
292
                    env->eip, eflags,
293
                    eflags & DF_MASK ? 'D' : '-',
294
                    eflags & CC_O ? 'O' : '-',
295
                    eflags & CC_S ? 'S' : '-',
296
                    eflags & CC_Z ? 'Z' : '-',
297
                    eflags & CC_A ? 'A' : '-',
298
                    eflags & CC_P ? 'P' : '-',
299
                    eflags & CC_C ? 'C' : '-',
300
                    env->hflags & HF_CPL_MASK,
301
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
302
                    (env->a20_mask >> 20) & 1,
303
                    (env->hflags >> HF_SMM_SHIFT) & 1,
304
                    env->halted);
305
    } else
306
#endif
307
    {
308
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
309
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
310
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
311
                    (uint32_t)env->regs[R_EAX],
312
                    (uint32_t)env->regs[R_EBX],
313
                    (uint32_t)env->regs[R_ECX],
314
                    (uint32_t)env->regs[R_EDX],
315
                    (uint32_t)env->regs[R_ESI],
316
                    (uint32_t)env->regs[R_EDI],
317
                    (uint32_t)env->regs[R_EBP],
318
                    (uint32_t)env->regs[R_ESP],
319
                    (uint32_t)env->eip, eflags,
320
                    eflags & DF_MASK ? 'D' : '-',
321
                    eflags & CC_O ? 'O' : '-',
322
                    eflags & CC_S ? 'S' : '-',
323
                    eflags & CC_Z ? 'Z' : '-',
324
                    eflags & CC_A ? 'A' : '-',
325
                    eflags & CC_P ? 'P' : '-',
326
                    eflags & CC_C ? 'C' : '-',
327
                    env->hflags & HF_CPL_MASK,
328
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
329
                    (env->a20_mask >> 20) & 1,
330
                    (env->hflags >> HF_SMM_SHIFT) & 1,
331
                    env->halted);
332
    }
333

    
334
    for(i = 0; i < 6; i++) {
335
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
336
                               &env->segs[i]);
337
    }
338
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
339
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
340

    
341
#ifdef TARGET_X86_64
342
    if (env->hflags & HF_LMA_MASK) {
343
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
344
                    env->gdt.base, env->gdt.limit);
345
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
346
                    env->idt.base, env->idt.limit);
347
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
348
                    (uint32_t)env->cr[0],
349
                    env->cr[2],
350
                    env->cr[3],
351
                    (uint32_t)env->cr[4]);
352
        for(i = 0; i < 4; i++)
353
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
354
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
355
                    env->dr[6], env->dr[7]);
356
    } else
357
#endif
358
    {
359
        cpu_fprintf(f, "GDT=     %08x %08x\n",
360
                    (uint32_t)env->gdt.base, env->gdt.limit);
361
        cpu_fprintf(f, "IDT=     %08x %08x\n",
362
                    (uint32_t)env->idt.base, env->idt.limit);
363
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
364
                    (uint32_t)env->cr[0],
365
                    (uint32_t)env->cr[2],
366
                    (uint32_t)env->cr[3],
367
                    (uint32_t)env->cr[4]);
368
        for(i = 0; i < 4; i++) {
369
            cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
370
        }
371
        cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
372
                    env->dr[6], env->dr[7]);
373
    }
374
    if (flags & X86_DUMP_CCOP) {
375
        if ((unsigned)env->cc_op < CC_OP_NB)
376
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
377
        else
378
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
379
#ifdef TARGET_X86_64
380
        if (env->hflags & HF_CS64_MASK) {
381
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
382
                        env->cc_src, env->cc_dst,
383
                        cc_op_name);
384
        } else
385
#endif
386
        {
387
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
388
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
389
                        cc_op_name);
390
        }
391
    }
392
    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
393
    if (flags & X86_DUMP_FPU) {
394
        int fptag;
395
        fptag = 0;
396
        for(i = 0; i < 8; i++) {
397
            fptag |= ((!env->fptags[i]) << i);
398
        }
399
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
400
                    env->fpuc,
401
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
402
                    env->fpstt,
403
                    fptag,
404
                    env->mxcsr);
405
        for(i=0;i<8;i++) {
406
#if defined(USE_X86LDOUBLE)
407
            CPU_LDoubleU u;
408
            u.d = env->fpregs[i].d;
409
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
410
                        i, u.l.lower, u.l.upper);
411
#else
412
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
413
                        i, env->fpregs[i].mmx.q);
414
#endif
415
            if ((i & 1) == 1)
416
                cpu_fprintf(f, "\n");
417
            else
418
                cpu_fprintf(f, " ");
419
        }
420
        if (env->hflags & HF_CS64_MASK)
421
            nb = 16;
422
        else
423
            nb = 8;
424
        for(i=0;i<nb;i++) {
425
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
426
                        i,
427
                        env->xmm_regs[i].XMM_L(3),
428
                        env->xmm_regs[i].XMM_L(2),
429
                        env->xmm_regs[i].XMM_L(1),
430
                        env->xmm_regs[i].XMM_L(0));
431
            if ((i & 1) == 1)
432
                cpu_fprintf(f, "\n");
433
            else
434
                cpu_fprintf(f, " ");
435
        }
436
    }
437
    if (flags & CPU_DUMP_CODE) {
438
        target_ulong base = env->segs[R_CS].base + env->eip;
439
        target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
440
        uint8_t code;
441
        char codestr[3];
442

    
443
        cpu_fprintf(f, "Code=");
444
        for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
445
            if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
446
                snprintf(codestr, sizeof(codestr), "%02x", code);
447
            } else {
448
                snprintf(codestr, sizeof(codestr), "??");
449
            }
450
            cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
451
                        i == offs ? "<" : "", codestr, i == offs ? ">" : "");
452
        }
453
        cpu_fprintf(f, "\n");
454
    }
455
}
456

    
457
/***********************************************************/
458
/* x86 mmu */
459
/* XXX: add PGE support */
460

    
461
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
462
{
463
    a20_state = (a20_state != 0);
464
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
465
#if defined(DEBUG_MMU)
466
        printf("A20 update: a20=%d\n", a20_state);
467
#endif
468
        /* if the cpu is currently executing code, we must unlink it and
469
           all the potentially executing TB */
470
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
471

    
472
        /* when a20 is changed, all the MMU mappings are invalid, so
473
           we must flush everything */
474
        tlb_flush(env, 1);
475
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
476
    }
477
}
478

    
479
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
480
{
481
    int pe_state;
482

    
483
#if defined(DEBUG_MMU)
484
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
485
#endif
486
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
487
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
488
        tlb_flush(env, 1);
489
    }
490

    
491
#ifdef TARGET_X86_64
492
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
493
        (env->efer & MSR_EFER_LME)) {
494
        /* enter in long mode */
495
        /* XXX: generate an exception */
496
        if (!(env->cr[4] & CR4_PAE_MASK))
497
            return;
498
        env->efer |= MSR_EFER_LMA;
499
        env->hflags |= HF_LMA_MASK;
500
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
501
               (env->efer & MSR_EFER_LMA)) {
502
        /* exit long mode */
503
        env->efer &= ~MSR_EFER_LMA;
504
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
505
        env->eip &= 0xffffffff;
506
    }
507
#endif
508
    env->cr[0] = new_cr0 | CR0_ET_MASK;
509

    
510
    /* update PE flag in hidden flags */
511
    pe_state = (env->cr[0] & CR0_PE_MASK);
512
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
513
    /* ensure that ADDSEG is always set in real mode */
514
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
515
    /* update FPU flags */
516
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
517
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
518
}
519

    
520
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
521
   the PDPT */
522
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
523
{
524
    env->cr[3] = new_cr3;
525
    if (env->cr[0] & CR0_PG_MASK) {
526
#if defined(DEBUG_MMU)
527
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
528
#endif
529
        tlb_flush(env, 0);
530
    }
531
}
532

    
533
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
534
{
535
#if defined(DEBUG_MMU)
536
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
537
#endif
538
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
539
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
540
        tlb_flush(env, 1);
541
    }
542
    /* SSE handling */
543
    if (!(env->cpuid_features & CPUID_SSE))
544
        new_cr4 &= ~CR4_OSFXSR_MASK;
545
    if (new_cr4 & CR4_OSFXSR_MASK)
546
        env->hflags |= HF_OSFXSR_MASK;
547
    else
548
        env->hflags &= ~HF_OSFXSR_MASK;
549

    
550
    env->cr[4] = new_cr4;
551
}
552

    
553
#if defined(CONFIG_USER_ONLY)
554

    
555
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
556
                             int is_write, int mmu_idx, int is_softmmu)
557
{
558
    /* user mode only emulation */
559
    is_write &= 1;
560
    env->cr[2] = addr;
561
    env->error_code = (is_write << PG_ERROR_W_BIT);
562
    env->error_code |= PG_ERROR_U_MASK;
563
    env->exception_index = EXCP0E_PAGE;
564
    return 1;
565
}
566

    
567
#else
568

    
569
/* XXX: This value should match the one returned by CPUID
570
 * and in exec.c */
571
# if defined(TARGET_X86_64)
572
# define PHYS_ADDR_MASK 0xfffffff000LL
573
# else
574
# define PHYS_ADDR_MASK 0xffffff000LL
575
# endif
576

    
577
/* return value:
578
   -1 = cannot handle fault
579
   0  = nothing more to do
580
   1  = generate PF fault
581
*/
582
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
583
                             int is_write1, int mmu_idx, int is_softmmu)
584
{
585
    uint64_t ptep, pte;
586
    target_ulong pde_addr, pte_addr;
587
    int error_code, is_dirty, prot, page_size, is_write, is_user;
588
    target_phys_addr_t paddr;
589
    uint32_t page_offset;
590
    target_ulong vaddr, virt_addr;
591

    
592
    is_user = mmu_idx == MMU_USER_IDX;
593
#if defined(DEBUG_MMU)
594
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
595
           addr, is_write1, is_user, env->eip);
596
#endif
597
    is_write = is_write1 & 1;
598

    
599
    if (!(env->cr[0] & CR0_PG_MASK)) {
600
        pte = addr;
601
        virt_addr = addr & TARGET_PAGE_MASK;
602
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
603
        page_size = 4096;
604
        goto do_mapping;
605
    }
606

    
607
    if (env->cr[4] & CR4_PAE_MASK) {
608
        uint64_t pde, pdpe;
609
        target_ulong pdpe_addr;
610

    
611
#ifdef TARGET_X86_64
612
        if (env->hflags & HF_LMA_MASK) {
613
            uint64_t pml4e_addr, pml4e;
614
            int32_t sext;
615

    
616
            /* test virtual address sign extension */
617
            sext = (int64_t)addr >> 47;
618
            if (sext != 0 && sext != -1) {
619
                env->error_code = 0;
620
                env->exception_index = EXCP0D_GPF;
621
                return 1;
622
            }
623

    
624
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
625
                env->a20_mask;
626
            pml4e = ldq_phys(pml4e_addr);
627
            if (!(pml4e & PG_PRESENT_MASK)) {
628
                error_code = 0;
629
                goto do_fault;
630
            }
631
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
632
                error_code = PG_ERROR_RSVD_MASK;
633
                goto do_fault;
634
            }
635
            if (!(pml4e & PG_ACCESSED_MASK)) {
636
                pml4e |= PG_ACCESSED_MASK;
637
                stl_phys_notdirty(pml4e_addr, pml4e);
638
            }
639
            ptep = pml4e ^ PG_NX_MASK;
640
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
641
                env->a20_mask;
642
            pdpe = ldq_phys(pdpe_addr);
643
            if (!(pdpe & PG_PRESENT_MASK)) {
644
                error_code = 0;
645
                goto do_fault;
646
            }
647
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
648
                error_code = PG_ERROR_RSVD_MASK;
649
                goto do_fault;
650
            }
651
            ptep &= pdpe ^ PG_NX_MASK;
652
            if (!(pdpe & PG_ACCESSED_MASK)) {
653
                pdpe |= PG_ACCESSED_MASK;
654
                stl_phys_notdirty(pdpe_addr, pdpe);
655
            }
656
        } else
657
#endif
658
        {
659
            /* XXX: load them when cr3 is loaded ? */
660
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
661
                env->a20_mask;
662
            pdpe = ldq_phys(pdpe_addr);
663
            if (!(pdpe & PG_PRESENT_MASK)) {
664
                error_code = 0;
665
                goto do_fault;
666
            }
667
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
668
        }
669

    
670
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
671
            env->a20_mask;
672
        pde = ldq_phys(pde_addr);
673
        if (!(pde & PG_PRESENT_MASK)) {
674
            error_code = 0;
675
            goto do_fault;
676
        }
677
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
678
            error_code = PG_ERROR_RSVD_MASK;
679
            goto do_fault;
680
        }
681
        ptep &= pde ^ PG_NX_MASK;
682
        if (pde & PG_PSE_MASK) {
683
            /* 2 MB page */
684
            page_size = 2048 * 1024;
685
            ptep ^= PG_NX_MASK;
686
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
687
                goto do_fault_protect;
688
            if (is_user) {
689
                if (!(ptep & PG_USER_MASK))
690
                    goto do_fault_protect;
691
                if (is_write && !(ptep & PG_RW_MASK))
692
                    goto do_fault_protect;
693
            } else {
694
                if ((env->cr[0] & CR0_WP_MASK) &&
695
                    is_write && !(ptep & PG_RW_MASK))
696
                    goto do_fault_protect;
697
            }
698
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
699
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
700
                pde |= PG_ACCESSED_MASK;
701
                if (is_dirty)
702
                    pde |= PG_DIRTY_MASK;
703
                stl_phys_notdirty(pde_addr, pde);
704
            }
705
            /* align to page_size */
706
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
707
            virt_addr = addr & ~(page_size - 1);
708
        } else {
709
            /* 4 KB page */
710
            if (!(pde & PG_ACCESSED_MASK)) {
711
                pde |= PG_ACCESSED_MASK;
712
                stl_phys_notdirty(pde_addr, pde);
713
            }
714
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
715
                env->a20_mask;
716
            pte = ldq_phys(pte_addr);
717
            if (!(pte & PG_PRESENT_MASK)) {
718
                error_code = 0;
719
                goto do_fault;
720
            }
721
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
722
                error_code = PG_ERROR_RSVD_MASK;
723
                goto do_fault;
724
            }
725
            /* combine pde and pte nx, user and rw protections */
726
            ptep &= pte ^ PG_NX_MASK;
727
            ptep ^= PG_NX_MASK;
728
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
729
                goto do_fault_protect;
730
            if (is_user) {
731
                if (!(ptep & PG_USER_MASK))
732
                    goto do_fault_protect;
733
                if (is_write && !(ptep & PG_RW_MASK))
734
                    goto do_fault_protect;
735
            } else {
736
                if ((env->cr[0] & CR0_WP_MASK) &&
737
                    is_write && !(ptep & PG_RW_MASK))
738
                    goto do_fault_protect;
739
            }
740
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
741
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
742
                pte |= PG_ACCESSED_MASK;
743
                if (is_dirty)
744
                    pte |= PG_DIRTY_MASK;
745
                stl_phys_notdirty(pte_addr, pte);
746
            }
747
            page_size = 4096;
748
            virt_addr = addr & ~0xfff;
749
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
750
        }
751
    } else {
752
        uint32_t pde;
753

    
754
        /* page directory entry */
755
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
756
            env->a20_mask;
757
        pde = ldl_phys(pde_addr);
758
        if (!(pde & PG_PRESENT_MASK)) {
759
            error_code = 0;
760
            goto do_fault;
761
        }
762
        /* if PSE bit is set, then we use a 4MB page */
763
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
764
            page_size = 4096 * 1024;
765
            if (is_user) {
766
                if (!(pde & PG_USER_MASK))
767
                    goto do_fault_protect;
768
                if (is_write && !(pde & PG_RW_MASK))
769
                    goto do_fault_protect;
770
            } else {
771
                if ((env->cr[0] & CR0_WP_MASK) &&
772
                    is_write && !(pde & PG_RW_MASK))
773
                    goto do_fault_protect;
774
            }
775
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
776
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
777
                pde |= PG_ACCESSED_MASK;
778
                if (is_dirty)
779
                    pde |= PG_DIRTY_MASK;
780
                stl_phys_notdirty(pde_addr, pde);
781
            }
782

    
783
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
784
            ptep = pte;
785
            virt_addr = addr & ~(page_size - 1);
786
        } else {
787
            if (!(pde & PG_ACCESSED_MASK)) {
788
                pde |= PG_ACCESSED_MASK;
789
                stl_phys_notdirty(pde_addr, pde);
790
            }
791

    
792
            /* page directory entry */
793
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
794
                env->a20_mask;
795
            pte = ldl_phys(pte_addr);
796
            if (!(pte & PG_PRESENT_MASK)) {
797
                error_code = 0;
798
                goto do_fault;
799
            }
800
            /* combine pde and pte user and rw protections */
801
            ptep = pte & pde;
802
            if (is_user) {
803
                if (!(ptep & PG_USER_MASK))
804
                    goto do_fault_protect;
805
                if (is_write && !(ptep & PG_RW_MASK))
806
                    goto do_fault_protect;
807
            } else {
808
                if ((env->cr[0] & CR0_WP_MASK) &&
809
                    is_write && !(ptep & PG_RW_MASK))
810
                    goto do_fault_protect;
811
            }
812
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
813
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
814
                pte |= PG_ACCESSED_MASK;
815
                if (is_dirty)
816
                    pte |= PG_DIRTY_MASK;
817
                stl_phys_notdirty(pte_addr, pte);
818
            }
819
            page_size = 4096;
820
            virt_addr = addr & ~0xfff;
821
        }
822
    }
823
    /* the page can be put in the TLB */
824
    prot = PAGE_READ;
825
    if (!(ptep & PG_NX_MASK))
826
        prot |= PAGE_EXEC;
827
    if (pte & PG_DIRTY_MASK) {
828
        /* only set write access if already dirty... otherwise wait
829
           for dirty access */
830
        if (is_user) {
831
            if (ptep & PG_RW_MASK)
832
                prot |= PAGE_WRITE;
833
        } else {
834
            if (!(env->cr[0] & CR0_WP_MASK) ||
835
                (ptep & PG_RW_MASK))
836
                prot |= PAGE_WRITE;
837
        }
838
    }
839
 do_mapping:
840
    pte = pte & env->a20_mask;
841

    
842
    /* Even if 4MB pages, we map only one 4KB page in the cache to
843
       avoid filling it too fast */
844
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
845
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
846
    vaddr = virt_addr + page_offset;
847

    
848
    tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
849
    return 0;
850
 do_fault_protect:
851
    error_code = PG_ERROR_P_MASK;
852
 do_fault:
853
    error_code |= (is_write << PG_ERROR_W_BIT);
854
    if (is_user)
855
        error_code |= PG_ERROR_U_MASK;
856
    if (is_write1 == 2 &&
857
        (env->efer & MSR_EFER_NXE) &&
858
        (env->cr[4] & CR4_PAE_MASK))
859
        error_code |= PG_ERROR_I_D_MASK;
860
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
861
        /* cr2 is not modified in case of exceptions */
862
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
863
                 addr);
864
    } else {
865
        env->cr[2] = addr;
866
    }
867
    env->error_code = error_code;
868
    env->exception_index = EXCP0E_PAGE;
869
    return 1;
870
}
871

    
872
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
873
{
874
    target_ulong pde_addr, pte_addr;
875
    uint64_t pte;
876
    target_phys_addr_t paddr;
877
    uint32_t page_offset;
878
    int page_size;
879

    
880
    if (env->cr[4] & CR4_PAE_MASK) {
881
        target_ulong pdpe_addr;
882
        uint64_t pde, pdpe;
883

    
884
#ifdef TARGET_X86_64
885
        if (env->hflags & HF_LMA_MASK) {
886
            uint64_t pml4e_addr, pml4e;
887
            int32_t sext;
888

    
889
            /* test virtual address sign extension */
890
            sext = (int64_t)addr >> 47;
891
            if (sext != 0 && sext != -1)
892
                return -1;
893

    
894
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
895
                env->a20_mask;
896
            pml4e = ldq_phys(pml4e_addr);
897
            if (!(pml4e & PG_PRESENT_MASK))
898
                return -1;
899

    
900
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
901
                env->a20_mask;
902
            pdpe = ldq_phys(pdpe_addr);
903
            if (!(pdpe & PG_PRESENT_MASK))
904
                return -1;
905
        } else
906
#endif
907
        {
908
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
909
                env->a20_mask;
910
            pdpe = ldq_phys(pdpe_addr);
911
            if (!(pdpe & PG_PRESENT_MASK))
912
                return -1;
913
        }
914

    
915
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
916
            env->a20_mask;
917
        pde = ldq_phys(pde_addr);
918
        if (!(pde & PG_PRESENT_MASK)) {
919
            return -1;
920
        }
921
        if (pde & PG_PSE_MASK) {
922
            /* 2 MB page */
923
            page_size = 2048 * 1024;
924
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
925
        } else {
926
            /* 4 KB page */
927
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
928
                env->a20_mask;
929
            page_size = 4096;
930
            pte = ldq_phys(pte_addr);
931
        }
932
        if (!(pte & PG_PRESENT_MASK))
933
            return -1;
934
    } else {
935
        uint32_t pde;
936

    
937
        if (!(env->cr[0] & CR0_PG_MASK)) {
938
            pte = addr;
939
            page_size = 4096;
940
        } else {
941
            /* page directory entry */
942
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
943
            pde = ldl_phys(pde_addr);
944
            if (!(pde & PG_PRESENT_MASK))
945
                return -1;
946
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
947
                pte = pde & ~0x003ff000; /* align to 4MB */
948
                page_size = 4096 * 1024;
949
            } else {
950
                /* page directory entry */
951
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
952
                pte = ldl_phys(pte_addr);
953
                if (!(pte & PG_PRESENT_MASK))
954
                    return -1;
955
                page_size = 4096;
956
            }
957
        }
958
        pte = pte & env->a20_mask;
959
    }
960

    
961
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
962
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
963
    return paddr;
964
}
965

    
966
void hw_breakpoint_insert(CPUState *env, int index)
967
{
968
    int type, err = 0;
969

    
970
    switch (hw_breakpoint_type(env->dr[7], index)) {
971
    case 0:
972
        if (hw_breakpoint_enabled(env->dr[7], index))
973
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
974
                                        &env->cpu_breakpoint[index]);
975
        break;
976
    case 1:
977
        type = BP_CPU | BP_MEM_WRITE;
978
        goto insert_wp;
979
    case 2:
980
         /* No support for I/O watchpoints yet */
981
        break;
982
    case 3:
983
        type = BP_CPU | BP_MEM_ACCESS;
984
    insert_wp:
985
        err = cpu_watchpoint_insert(env, env->dr[index],
986
                                    hw_breakpoint_len(env->dr[7], index),
987
                                    type, &env->cpu_watchpoint[index]);
988
        break;
989
    }
990
    if (err)
991
        env->cpu_breakpoint[index] = NULL;
992
}
993

    
994
void hw_breakpoint_remove(CPUState *env, int index)
995
{
996
    if (!env->cpu_breakpoint[index])
997
        return;
998
    switch (hw_breakpoint_type(env->dr[7], index)) {
999
    case 0:
1000
        if (hw_breakpoint_enabled(env->dr[7], index))
1001
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1002
        break;
1003
    case 1:
1004
    case 3:
1005
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1006
        break;
1007
    case 2:
1008
        /* No support for I/O watchpoints yet */
1009
        break;
1010
    }
1011
}
1012

    
1013
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1014
{
1015
    target_ulong dr6;
1016
    int reg, type;
1017
    int hit_enabled = 0;
1018

    
1019
    dr6 = env->dr[6] & ~0xf;
1020
    for (reg = 0; reg < 4; reg++) {
1021
        type = hw_breakpoint_type(env->dr[7], reg);
1022
        if ((type == 0 && env->dr[reg] == env->eip) ||
1023
            ((type & 1) && env->cpu_watchpoint[reg] &&
1024
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1025
            dr6 |= 1 << reg;
1026
            if (hw_breakpoint_enabled(env->dr[7], reg))
1027
                hit_enabled = 1;
1028
        }
1029
    }
1030
    if (hit_enabled || force_dr6_update)
1031
        env->dr[6] = dr6;
1032
    return hit_enabled;
1033
}
1034

    
1035
static CPUDebugExcpHandler *prev_debug_excp_handler;
1036

    
1037
void raise_exception_env(int exception_index, CPUState *env);
1038

    
1039
static void breakpoint_handler(CPUState *env)
1040
{
1041
    CPUBreakpoint *bp;
1042

    
1043
    if (env->watchpoint_hit) {
1044
        if (env->watchpoint_hit->flags & BP_CPU) {
1045
            env->watchpoint_hit = NULL;
1046
            if (check_hw_breakpoints(env, 0))
1047
                raise_exception_env(EXCP01_DB, env);
1048
            else
1049
                cpu_resume_from_signal(env, NULL);
1050
        }
1051
    } else {
1052
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1053
            if (bp->pc == env->eip) {
1054
                if (bp->flags & BP_CPU) {
1055
                    check_hw_breakpoints(env, 1);
1056
                    raise_exception_env(EXCP01_DB, env);
1057
                }
1058
                break;
1059
            }
1060
    }
1061
    if (prev_debug_excp_handler)
1062
        prev_debug_excp_handler(env);
1063
}
1064

    
1065
typedef struct MCEInjectionParams {
1066
    Monitor *mon;
1067
    CPUState *env;
1068
    int bank;
1069
    uint64_t status;
1070
    uint64_t mcg_status;
1071
    uint64_t addr;
1072
    uint64_t misc;
1073
    int flags;
1074
} MCEInjectionParams;
1075

    
1076
static void do_inject_x86_mce(void *data)
1077
{
1078
    MCEInjectionParams *params = data;
1079
    CPUState *cenv = params->env;
1080
    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1081

    
1082
    cpu_synchronize_state(cenv);
1083

    
1084
    /*
1085
     * If there is an MCE exception being processed, ignore this SRAO MCE
1086
     * unless unconditional injection was requested.
1087
     */
1088
    if (!(params->flags & MCE_INJECT_UNCOND_AO)
1089
        && !(params->status & MCI_STATUS_AR)
1090
        && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1091
        return;
1092
    }
1093

    
1094
    if (params->status & MCI_STATUS_UC) {
1095
        /*
1096
         * if MSR_MCG_CTL is not all 1s, the uncorrected error
1097
         * reporting is disabled
1098
         */
1099
        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1100
            monitor_printf(params->mon,
1101
                           "CPU %d: Uncorrected error reporting disabled\n",
1102
                           cenv->cpu_index);
1103
            return;
1104
        }
1105

    
1106
        /*
1107
         * if MSR_MCi_CTL is not all 1s, the uncorrected error
1108
         * reporting is disabled for the bank
1109
         */
1110
        if (banks[0] != ~(uint64_t)0) {
1111
            monitor_printf(params->mon,
1112
                           "CPU %d: Uncorrected error reporting disabled for"
1113
                           " bank %d\n",
1114
                           cenv->cpu_index, params->bank);
1115
            return;
1116
        }
1117

    
1118
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1119
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1120
            monitor_printf(params->mon,
1121
                           "CPU %d: Previous MCE still in progress, raising"
1122
                           " triple fault\n",
1123
                           cenv->cpu_index);
1124
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1125
            qemu_system_reset_request();
1126
            return;
1127
        }
1128
        if (banks[1] & MCI_STATUS_VAL) {
1129
            params->status |= MCI_STATUS_OVER;
1130
        }
1131
        banks[2] = params->addr;
1132
        banks[3] = params->misc;
1133
        cenv->mcg_status = params->mcg_status;
1134
        banks[1] = params->status;
1135
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1136
    } else if (!(banks[1] & MCI_STATUS_VAL)
1137
               || !(banks[1] & MCI_STATUS_UC)) {
1138
        if (banks[1] & MCI_STATUS_VAL) {
1139
            params->status |= MCI_STATUS_OVER;
1140
        }
1141
        banks[2] = params->addr;
1142
        banks[3] = params->misc;
1143
        banks[1] = params->status;
1144
    } else {
1145
        banks[1] |= MCI_STATUS_OVER;
1146
    }
1147
}
1148

    
1149
void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1150
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
1151
                        uint64_t misc, int flags)
1152
{
1153
    MCEInjectionParams params = {
1154
        .mon = mon,
1155
        .env = cenv,
1156
        .bank = bank,
1157
        .status = status,
1158
        .mcg_status = mcg_status,
1159
        .addr = addr,
1160
        .misc = misc,
1161
        .flags = flags,
1162
    };
1163
    unsigned bank_num = cenv->mcg_cap & 0xff;
1164
    CPUState *env;
1165

    
1166
    if (!cenv->mcg_cap) {
1167
        monitor_printf(mon, "MCE injection not supported\n");
1168
        return;
1169
    }
1170
    if (bank >= bank_num) {
1171
        monitor_printf(mon, "Invalid MCE bank number\n");
1172
        return;
1173
    }
1174
    if (!(status & MCI_STATUS_VAL)) {
1175
        monitor_printf(mon, "Invalid MCE status code\n");
1176
        return;
1177
    }
1178
    if ((flags & MCE_INJECT_BROADCAST)
1179
        && !cpu_x86_support_mca_broadcast(cenv)) {
1180
        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1181
        return;
1182
    }
1183

    
1184
    run_on_cpu(cenv, do_inject_x86_mce, &params);
1185
    if (flags & MCE_INJECT_BROADCAST) {
1186
        params.bank = 1;
1187
        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1188
        params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1189
        params.addr = 0;
1190
        params.misc = 0;
1191
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
1192
            if (cenv == env) {
1193
                continue;
1194
            }
1195
            params.env = env;
1196
            run_on_cpu(cenv, do_inject_x86_mce, &params);
1197
        }
1198
    }
1199
}
1200
#endif /* !CONFIG_USER_ONLY */
1201

    
1202
static void mce_init(CPUX86State *cenv)
1203
{
1204
    unsigned int bank;
1205

    
1206
    if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1207
        && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1208
            (CPUID_MCE | CPUID_MCA)) {
1209
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1210
        cenv->mcg_ctl = ~(uint64_t)0;
1211
        for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1212
            cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1213
        }
1214
    }
1215
}
1216

    
1217
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1218
                            target_ulong *base, unsigned int *limit,
1219
                            unsigned int *flags)
1220
{
1221
    SegmentCache *dt;
1222
    target_ulong ptr;
1223
    uint32_t e1, e2;
1224
    int index;
1225

    
1226
    if (selector & 0x4)
1227
        dt = &env->ldt;
1228
    else
1229
        dt = &env->gdt;
1230
    index = selector & ~7;
1231
    ptr = dt->base + index;
1232
    if ((index + 7) > dt->limit
1233
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1234
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1235
        return 0;
1236

    
1237
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1238
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1239
    if (e2 & DESC_G_MASK)
1240
        *limit = (*limit << 12) | 0xfff;
1241
    *flags = e2;
1242

    
1243
    return 1;
1244
}
1245

    
1246
CPUX86State *cpu_x86_init(const char *cpu_model)
1247
{
1248
    CPUX86State *env;
1249
    static int inited;
1250

    
1251
    env = qemu_mallocz(sizeof(CPUX86State));
1252
    cpu_exec_init(env);
1253
    env->cpu_model_str = cpu_model;
1254

    
1255
    /* init various static tables */
1256
    if (!inited) {
1257
        inited = 1;
1258
        optimize_flags_init();
1259
#ifndef CONFIG_USER_ONLY
1260
        prev_debug_excp_handler =
1261
            cpu_set_debug_excp_handler(breakpoint_handler);
1262
#endif
1263
    }
1264
    if (cpu_x86_register(env, cpu_model) < 0) {
1265
        cpu_x86_close(env);
1266
        return NULL;
1267
    }
1268
    mce_init(env);
1269

    
1270
    qemu_init_vcpu(env);
1271

    
1272
    return env;
1273
}
1274

    
1275
#if !defined(CONFIG_USER_ONLY)
1276
void do_cpu_init(CPUState *env)
1277
{
1278
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1279
    uint64_t pat = env->pat;
1280

    
1281
    cpu_reset(env);
1282
    env->interrupt_request = sipi;
1283
    env->pat = pat;
1284
    apic_init_reset(env->apic_state);
1285
    env->halted = !cpu_is_bsp(env);
1286
}
1287

    
1288
void do_cpu_sipi(CPUState *env)
1289
{
1290
    apic_sipi(env->apic_state);
1291
}
1292
#else
1293
void do_cpu_init(CPUState *env)
1294
{
1295
}
1296
void do_cpu_sipi(CPUState *env)
1297
{
1298
}
1299
#endif