Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ d5bfda33

History | View | Annotate | Download (41 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30
#include "kvm_x86.h"
31
#ifndef CONFIG_USER_ONLY
32
#include "sysemu.h"
33
#include "monitor.h"
34
#endif
35

    
36
//#define DEBUG_MMU
37

    
38
/* NOTE: must be called outside the CPU execute loop */
39
void cpu_reset(CPUX86State *env)
40
{
41
    int i;
42

    
43
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
44
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
45
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
46
    }
47

    
48
    memset(env, 0, offsetof(CPUX86State, breakpoints));
49

    
50
    tlb_flush(env, 1);
51

    
52
    env->old_exception = -1;
53

    
54
    /* init to reset state */
55

    
56
#ifdef CONFIG_SOFTMMU
57
    env->hflags |= HF_SOFTMMU_MASK;
58
#endif
59
    env->hflags2 |= HF2_GIF_MASK;
60

    
61
    cpu_x86_update_cr0(env, 0x60000010);
62
    env->a20_mask = ~0x0;
63
    env->smbase = 0x30000;
64

    
65
    env->idt.limit = 0xffff;
66
    env->gdt.limit = 0xffff;
67
    env->ldt.limit = 0xffff;
68
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
69
    env->tr.limit = 0xffff;
70
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
71

    
72
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
73
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
74
                           DESC_R_MASK | DESC_A_MASK);
75
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
76
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
77
                           DESC_A_MASK);
78
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
79
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
80
                           DESC_A_MASK);
81
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
82
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
83
                           DESC_A_MASK);
84
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
85
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
86
                           DESC_A_MASK);
87
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
88
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
89
                           DESC_A_MASK);
90

    
91
    env->eip = 0xfff0;
92
    env->regs[R_EDX] = env->cpuid_version;
93

    
94
    env->eflags = 0x2;
95

    
96
    /* FPU init */
97
    for(i = 0;i < 8; i++)
98
        env->fptags[i] = 1;
99
    env->fpuc = 0x37f;
100

    
101
    env->mxcsr = 0x1f80;
102

    
103
    memset(env->dr, 0, sizeof(env->dr));
104
    env->dr[6] = DR6_FIXED_1;
105
    env->dr[7] = DR7_FIXED_1;
106
    cpu_breakpoint_remove_all(env, BP_CPU);
107
    cpu_watchpoint_remove_all(env, BP_CPU);
108
}
109

    
110
void cpu_x86_close(CPUX86State *env)
111
{
112
    qemu_free(env);
113
}
114

    
115
static void cpu_x86_version(CPUState *env, int *family, int *model)
116
{
117
    int cpuver = env->cpuid_version;
118

    
119
    if (family == NULL || model == NULL) {
120
        return;
121
    }
122

    
123
    *family = (cpuver >> 8) & 0x0f;
124
    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
125
}
126

    
127
/* Broadcast MCA signal for processor version 06H_EH and above */
128
int cpu_x86_support_mca_broadcast(CPUState *env)
129
{
130
    int family = 0;
131
    int model = 0;
132

    
133
    cpu_x86_version(env, &family, &model);
134
    if ((family == 6 && model >= 14) || family > 6) {
135
        return 1;
136
    }
137

    
138
    return 0;
139
}
140

    
141
/***********************************************************/
142
/* x86 debug */
143

    
144
static const char *cc_op_str[] = {
145
    "DYNAMIC",
146
    "EFLAGS",
147

    
148
    "MULB",
149
    "MULW",
150
    "MULL",
151
    "MULQ",
152

    
153
    "ADDB",
154
    "ADDW",
155
    "ADDL",
156
    "ADDQ",
157

    
158
    "ADCB",
159
    "ADCW",
160
    "ADCL",
161
    "ADCQ",
162

    
163
    "SUBB",
164
    "SUBW",
165
    "SUBL",
166
    "SUBQ",
167

    
168
    "SBBB",
169
    "SBBW",
170
    "SBBL",
171
    "SBBQ",
172

    
173
    "LOGICB",
174
    "LOGICW",
175
    "LOGICL",
176
    "LOGICQ",
177

    
178
    "INCB",
179
    "INCW",
180
    "INCL",
181
    "INCQ",
182

    
183
    "DECB",
184
    "DECW",
185
    "DECL",
186
    "DECQ",
187

    
188
    "SHLB",
189
    "SHLW",
190
    "SHLL",
191
    "SHLQ",
192

    
193
    "SARB",
194
    "SARW",
195
    "SARL",
196
    "SARQ",
197
};
198

    
199
static void
200
cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
201
                       const char *name, struct SegmentCache *sc)
202
{
203
#ifdef TARGET_X86_64
204
    if (env->hflags & HF_CS64_MASK) {
205
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
206
                    sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
207
    } else
208
#endif
209
    {
210
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
211
                    (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
212
    }
213

    
214
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
215
        goto done;
216

    
217
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
218
    if (sc->flags & DESC_S_MASK) {
219
        if (sc->flags & DESC_CS_MASK) {
220
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
221
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
222
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
223
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
224
        } else {
225
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
226
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
227
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
228
        }
229
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
230
    } else {
231
        static const char *sys_type_name[2][16] = {
232
            { /* 32 bit mode */
233
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
234
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
235
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
236
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
237
            },
238
            { /* 64 bit mode */
239
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
240
                "Reserved", "Reserved", "Reserved", "Reserved",
241
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
242
                "Reserved", "IntGate64", "TrapGate64"
243
            }
244
        };
245
        cpu_fprintf(f, "%s",
246
                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
247
                                 [(sc->flags & DESC_TYPE_MASK)
248
                                  >> DESC_TYPE_SHIFT]);
249
    }
250
done:
251
    cpu_fprintf(f, "\n");
252
}
253

    
254
#define DUMP_CODE_BYTES_TOTAL    50
255
#define DUMP_CODE_BYTES_BACKWARD 20
256

    
257
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
258
                    int flags)
259
{
260
    int eflags, i, nb;
261
    char cc_op_name[32];
262
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
263

    
264
    cpu_synchronize_state(env);
265

    
266
    eflags = env->eflags;
267
#ifdef TARGET_X86_64
268
    if (env->hflags & HF_CS64_MASK) {
269
        cpu_fprintf(f,
270
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
271
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
272
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
273
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
274
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
275
                    env->regs[R_EAX],
276
                    env->regs[R_EBX],
277
                    env->regs[R_ECX],
278
                    env->regs[R_EDX],
279
                    env->regs[R_ESI],
280
                    env->regs[R_EDI],
281
                    env->regs[R_EBP],
282
                    env->regs[R_ESP],
283
                    env->regs[8],
284
                    env->regs[9],
285
                    env->regs[10],
286
                    env->regs[11],
287
                    env->regs[12],
288
                    env->regs[13],
289
                    env->regs[14],
290
                    env->regs[15],
291
                    env->eip, eflags,
292
                    eflags & DF_MASK ? 'D' : '-',
293
                    eflags & CC_O ? 'O' : '-',
294
                    eflags & CC_S ? 'S' : '-',
295
                    eflags & CC_Z ? 'Z' : '-',
296
                    eflags & CC_A ? 'A' : '-',
297
                    eflags & CC_P ? 'P' : '-',
298
                    eflags & CC_C ? 'C' : '-',
299
                    env->hflags & HF_CPL_MASK,
300
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
301
                    (env->a20_mask >> 20) & 1,
302
                    (env->hflags >> HF_SMM_SHIFT) & 1,
303
                    env->halted);
304
    } else
305
#endif
306
    {
307
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
308
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
309
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
310
                    (uint32_t)env->regs[R_EAX],
311
                    (uint32_t)env->regs[R_EBX],
312
                    (uint32_t)env->regs[R_ECX],
313
                    (uint32_t)env->regs[R_EDX],
314
                    (uint32_t)env->regs[R_ESI],
315
                    (uint32_t)env->regs[R_EDI],
316
                    (uint32_t)env->regs[R_EBP],
317
                    (uint32_t)env->regs[R_ESP],
318
                    (uint32_t)env->eip, eflags,
319
                    eflags & DF_MASK ? 'D' : '-',
320
                    eflags & CC_O ? 'O' : '-',
321
                    eflags & CC_S ? 'S' : '-',
322
                    eflags & CC_Z ? 'Z' : '-',
323
                    eflags & CC_A ? 'A' : '-',
324
                    eflags & CC_P ? 'P' : '-',
325
                    eflags & CC_C ? 'C' : '-',
326
                    env->hflags & HF_CPL_MASK,
327
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
328
                    (env->a20_mask >> 20) & 1,
329
                    (env->hflags >> HF_SMM_SHIFT) & 1,
330
                    env->halted);
331
    }
332

    
333
    for(i = 0; i < 6; i++) {
334
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
335
                               &env->segs[i]);
336
    }
337
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
338
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
339

    
340
#ifdef TARGET_X86_64
341
    if (env->hflags & HF_LMA_MASK) {
342
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
343
                    env->gdt.base, env->gdt.limit);
344
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
345
                    env->idt.base, env->idt.limit);
346
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
347
                    (uint32_t)env->cr[0],
348
                    env->cr[2],
349
                    env->cr[3],
350
                    (uint32_t)env->cr[4]);
351
        for(i = 0; i < 4; i++)
352
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
353
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
354
                    env->dr[6], env->dr[7]);
355
    } else
356
#endif
357
    {
358
        cpu_fprintf(f, "GDT=     %08x %08x\n",
359
                    (uint32_t)env->gdt.base, env->gdt.limit);
360
        cpu_fprintf(f, "IDT=     %08x %08x\n",
361
                    (uint32_t)env->idt.base, env->idt.limit);
362
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
363
                    (uint32_t)env->cr[0],
364
                    (uint32_t)env->cr[2],
365
                    (uint32_t)env->cr[3],
366
                    (uint32_t)env->cr[4]);
367
        for(i = 0; i < 4; i++) {
368
            cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
369
        }
370
        cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
371
                    env->dr[6], env->dr[7]);
372
    }
373
    if (flags & X86_DUMP_CCOP) {
374
        if ((unsigned)env->cc_op < CC_OP_NB)
375
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
376
        else
377
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
378
#ifdef TARGET_X86_64
379
        if (env->hflags & HF_CS64_MASK) {
380
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
381
                        env->cc_src, env->cc_dst,
382
                        cc_op_name);
383
        } else
384
#endif
385
        {
386
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
387
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
388
                        cc_op_name);
389
        }
390
    }
391
    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
392
    if (flags & X86_DUMP_FPU) {
393
        int fptag;
394
        fptag = 0;
395
        for(i = 0; i < 8; i++) {
396
            fptag |= ((!env->fptags[i]) << i);
397
        }
398
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
399
                    env->fpuc,
400
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
401
                    env->fpstt,
402
                    fptag,
403
                    env->mxcsr);
404
        for(i=0;i<8;i++) {
405
#if defined(USE_X86LDOUBLE)
406
            union {
407
                long double d;
408
                struct {
409
                    uint64_t lower;
410
                    uint16_t upper;
411
                } l;
412
            } tmp;
413
            tmp.d = env->fpregs[i].d;
414
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
415
                        i, tmp.l.lower, tmp.l.upper);
416
#else
417
            cpu_fprintf(f, "FPR%d=%016" PRIx64,
418
                        i, env->fpregs[i].mmx.q);
419
#endif
420
            if ((i & 1) == 1)
421
                cpu_fprintf(f, "\n");
422
            else
423
                cpu_fprintf(f, " ");
424
        }
425
        if (env->hflags & HF_CS64_MASK)
426
            nb = 16;
427
        else
428
            nb = 8;
429
        for(i=0;i<nb;i++) {
430
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
431
                        i,
432
                        env->xmm_regs[i].XMM_L(3),
433
                        env->xmm_regs[i].XMM_L(2),
434
                        env->xmm_regs[i].XMM_L(1),
435
                        env->xmm_regs[i].XMM_L(0));
436
            if ((i & 1) == 1)
437
                cpu_fprintf(f, "\n");
438
            else
439
                cpu_fprintf(f, " ");
440
        }
441
    }
442
    if (flags & CPU_DUMP_CODE) {
443
        target_ulong base = env->segs[R_CS].base + env->eip;
444
        target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
445
        uint8_t code;
446
        char codestr[3];
447

    
448
        cpu_fprintf(f, "Code=");
449
        for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
450
            if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
451
                snprintf(codestr, sizeof(codestr), "%02x", code);
452
            } else {
453
                snprintf(codestr, sizeof(codestr), "??");
454
            }
455
            cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
456
                        i == offs ? "<" : "", codestr, i == offs ? ">" : "");
457
        }
458
        cpu_fprintf(f, "\n");
459
    }
460
}
461

    
462
/***********************************************************/
463
/* x86 mmu */
464
/* XXX: add PGE support */
465

    
466
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
467
{
468
    a20_state = (a20_state != 0);
469
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
470
#if defined(DEBUG_MMU)
471
        printf("A20 update: a20=%d\n", a20_state);
472
#endif
473
        /* if the cpu is currently executing code, we must unlink it and
474
           all the potentially executing TB */
475
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
476

    
477
        /* when a20 is changed, all the MMU mappings are invalid, so
478
           we must flush everything */
479
        tlb_flush(env, 1);
480
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
481
    }
482
}
483

    
484
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
485
{
486
    int pe_state;
487

    
488
#if defined(DEBUG_MMU)
489
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
490
#endif
491
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
492
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
493
        tlb_flush(env, 1);
494
    }
495

    
496
#ifdef TARGET_X86_64
497
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
498
        (env->efer & MSR_EFER_LME)) {
499
        /* enter in long mode */
500
        /* XXX: generate an exception */
501
        if (!(env->cr[4] & CR4_PAE_MASK))
502
            return;
503
        env->efer |= MSR_EFER_LMA;
504
        env->hflags |= HF_LMA_MASK;
505
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
506
               (env->efer & MSR_EFER_LMA)) {
507
        /* exit long mode */
508
        env->efer &= ~MSR_EFER_LMA;
509
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
510
        env->eip &= 0xffffffff;
511
    }
512
#endif
513
    env->cr[0] = new_cr0 | CR0_ET_MASK;
514

    
515
    /* update PE flag in hidden flags */
516
    pe_state = (env->cr[0] & CR0_PE_MASK);
517
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
518
    /* ensure that ADDSEG is always set in real mode */
519
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
520
    /* update FPU flags */
521
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
522
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
523
}
524

    
525
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
526
   the PDPT */
527
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
528
{
529
    env->cr[3] = new_cr3;
530
    if (env->cr[0] & CR0_PG_MASK) {
531
#if defined(DEBUG_MMU)
532
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
533
#endif
534
        tlb_flush(env, 0);
535
    }
536
}
537

    
538
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
539
{
540
#if defined(DEBUG_MMU)
541
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
542
#endif
543
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
544
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
545
        tlb_flush(env, 1);
546
    }
547
    /* SSE handling */
548
    if (!(env->cpuid_features & CPUID_SSE))
549
        new_cr4 &= ~CR4_OSFXSR_MASK;
550
    if (new_cr4 & CR4_OSFXSR_MASK)
551
        env->hflags |= HF_OSFXSR_MASK;
552
    else
553
        env->hflags &= ~HF_OSFXSR_MASK;
554

    
555
    env->cr[4] = new_cr4;
556
}
557

    
558
#if defined(CONFIG_USER_ONLY)
559

    
560
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
561
                             int is_write, int mmu_idx, int is_softmmu)
562
{
563
    /* user mode only emulation */
564
    is_write &= 1;
565
    env->cr[2] = addr;
566
    env->error_code = (is_write << PG_ERROR_W_BIT);
567
    env->error_code |= PG_ERROR_U_MASK;
568
    env->exception_index = EXCP0E_PAGE;
569
    return 1;
570
}
571

    
572
#else
573

    
574
/* XXX: This value should match the one returned by CPUID
575
 * and in exec.c */
576
# if defined(TARGET_X86_64)
577
# define PHYS_ADDR_MASK 0xfffffff000LL
578
# else
579
# define PHYS_ADDR_MASK 0xffffff000LL
580
# endif
581

    
582
/* return value:
583
   -1 = cannot handle fault
584
   0  = nothing more to do
585
   1  = generate PF fault
586
*/
587
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
588
                             int is_write1, int mmu_idx, int is_softmmu)
589
{
590
    uint64_t ptep, pte;
591
    target_ulong pde_addr, pte_addr;
592
    int error_code, is_dirty, prot, page_size, is_write, is_user;
593
    target_phys_addr_t paddr;
594
    uint32_t page_offset;
595
    target_ulong vaddr, virt_addr;
596

    
597
    is_user = mmu_idx == MMU_USER_IDX;
598
#if defined(DEBUG_MMU)
599
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
600
           addr, is_write1, is_user, env->eip);
601
#endif
602
    is_write = is_write1 & 1;
603

    
604
    if (!(env->cr[0] & CR0_PG_MASK)) {
605
        pte = addr;
606
        virt_addr = addr & TARGET_PAGE_MASK;
607
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
608
        page_size = 4096;
609
        goto do_mapping;
610
    }
611

    
612
    if (env->cr[4] & CR4_PAE_MASK) {
613
        uint64_t pde, pdpe;
614
        target_ulong pdpe_addr;
615

    
616
#ifdef TARGET_X86_64
617
        if (env->hflags & HF_LMA_MASK) {
618
            uint64_t pml4e_addr, pml4e;
619
            int32_t sext;
620

    
621
            /* test virtual address sign extension */
622
            sext = (int64_t)addr >> 47;
623
            if (sext != 0 && sext != -1) {
624
                env->error_code = 0;
625
                env->exception_index = EXCP0D_GPF;
626
                return 1;
627
            }
628

    
629
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
630
                env->a20_mask;
631
            pml4e = ldq_phys(pml4e_addr);
632
            if (!(pml4e & PG_PRESENT_MASK)) {
633
                error_code = 0;
634
                goto do_fault;
635
            }
636
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
637
                error_code = PG_ERROR_RSVD_MASK;
638
                goto do_fault;
639
            }
640
            if (!(pml4e & PG_ACCESSED_MASK)) {
641
                pml4e |= PG_ACCESSED_MASK;
642
                stl_phys_notdirty(pml4e_addr, pml4e);
643
            }
644
            ptep = pml4e ^ PG_NX_MASK;
645
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
646
                env->a20_mask;
647
            pdpe = ldq_phys(pdpe_addr);
648
            if (!(pdpe & PG_PRESENT_MASK)) {
649
                error_code = 0;
650
                goto do_fault;
651
            }
652
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
653
                error_code = PG_ERROR_RSVD_MASK;
654
                goto do_fault;
655
            }
656
            ptep &= pdpe ^ PG_NX_MASK;
657
            if (!(pdpe & PG_ACCESSED_MASK)) {
658
                pdpe |= PG_ACCESSED_MASK;
659
                stl_phys_notdirty(pdpe_addr, pdpe);
660
            }
661
        } else
662
#endif
663
        {
664
            /* XXX: load them when cr3 is loaded ? */
665
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
666
                env->a20_mask;
667
            pdpe = ldq_phys(pdpe_addr);
668
            if (!(pdpe & PG_PRESENT_MASK)) {
669
                error_code = 0;
670
                goto do_fault;
671
            }
672
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
673
        }
674

    
675
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
676
            env->a20_mask;
677
        pde = ldq_phys(pde_addr);
678
        if (!(pde & PG_PRESENT_MASK)) {
679
            error_code = 0;
680
            goto do_fault;
681
        }
682
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
683
            error_code = PG_ERROR_RSVD_MASK;
684
            goto do_fault;
685
        }
686
        ptep &= pde ^ PG_NX_MASK;
687
        if (pde & PG_PSE_MASK) {
688
            /* 2 MB page */
689
            page_size = 2048 * 1024;
690
            ptep ^= PG_NX_MASK;
691
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
692
                goto do_fault_protect;
693
            if (is_user) {
694
                if (!(ptep & PG_USER_MASK))
695
                    goto do_fault_protect;
696
                if (is_write && !(ptep & PG_RW_MASK))
697
                    goto do_fault_protect;
698
            } else {
699
                if ((env->cr[0] & CR0_WP_MASK) &&
700
                    is_write && !(ptep & PG_RW_MASK))
701
                    goto do_fault_protect;
702
            }
703
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
704
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
705
                pde |= PG_ACCESSED_MASK;
706
                if (is_dirty)
707
                    pde |= PG_DIRTY_MASK;
708
                stl_phys_notdirty(pde_addr, pde);
709
            }
710
            /* align to page_size */
711
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
712
            virt_addr = addr & ~(page_size - 1);
713
        } else {
714
            /* 4 KB page */
715
            if (!(pde & PG_ACCESSED_MASK)) {
716
                pde |= PG_ACCESSED_MASK;
717
                stl_phys_notdirty(pde_addr, pde);
718
            }
719
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
720
                env->a20_mask;
721
            pte = ldq_phys(pte_addr);
722
            if (!(pte & PG_PRESENT_MASK)) {
723
                error_code = 0;
724
                goto do_fault;
725
            }
726
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
727
                error_code = PG_ERROR_RSVD_MASK;
728
                goto do_fault;
729
            }
730
            /* combine pde and pte nx, user and rw protections */
731
            ptep &= pte ^ PG_NX_MASK;
732
            ptep ^= PG_NX_MASK;
733
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
734
                goto do_fault_protect;
735
            if (is_user) {
736
                if (!(ptep & PG_USER_MASK))
737
                    goto do_fault_protect;
738
                if (is_write && !(ptep & PG_RW_MASK))
739
                    goto do_fault_protect;
740
            } else {
741
                if ((env->cr[0] & CR0_WP_MASK) &&
742
                    is_write && !(ptep & PG_RW_MASK))
743
                    goto do_fault_protect;
744
            }
745
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
746
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
747
                pte |= PG_ACCESSED_MASK;
748
                if (is_dirty)
749
                    pte |= PG_DIRTY_MASK;
750
                stl_phys_notdirty(pte_addr, pte);
751
            }
752
            page_size = 4096;
753
            virt_addr = addr & ~0xfff;
754
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
755
        }
756
    } else {
757
        uint32_t pde;
758

    
759
        /* page directory entry */
760
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
761
            env->a20_mask;
762
        pde = ldl_phys(pde_addr);
763
        if (!(pde & PG_PRESENT_MASK)) {
764
            error_code = 0;
765
            goto do_fault;
766
        }
767
        /* if PSE bit is set, then we use a 4MB page */
768
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
769
            page_size = 4096 * 1024;
770
            if (is_user) {
771
                if (!(pde & PG_USER_MASK))
772
                    goto do_fault_protect;
773
                if (is_write && !(pde & PG_RW_MASK))
774
                    goto do_fault_protect;
775
            } else {
776
                if ((env->cr[0] & CR0_WP_MASK) &&
777
                    is_write && !(pde & PG_RW_MASK))
778
                    goto do_fault_protect;
779
            }
780
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
781
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
782
                pde |= PG_ACCESSED_MASK;
783
                if (is_dirty)
784
                    pde |= PG_DIRTY_MASK;
785
                stl_phys_notdirty(pde_addr, pde);
786
            }
787

    
788
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
789
            ptep = pte;
790
            virt_addr = addr & ~(page_size - 1);
791
        } else {
792
            if (!(pde & PG_ACCESSED_MASK)) {
793
                pde |= PG_ACCESSED_MASK;
794
                stl_phys_notdirty(pde_addr, pde);
795
            }
796

    
797
            /* page directory entry */
798
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
799
                env->a20_mask;
800
            pte = ldl_phys(pte_addr);
801
            if (!(pte & PG_PRESENT_MASK)) {
802
                error_code = 0;
803
                goto do_fault;
804
            }
805
            /* combine pde and pte user and rw protections */
806
            ptep = pte & pde;
807
            if (is_user) {
808
                if (!(ptep & PG_USER_MASK))
809
                    goto do_fault_protect;
810
                if (is_write && !(ptep & PG_RW_MASK))
811
                    goto do_fault_protect;
812
            } else {
813
                if ((env->cr[0] & CR0_WP_MASK) &&
814
                    is_write && !(ptep & PG_RW_MASK))
815
                    goto do_fault_protect;
816
            }
817
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
818
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
819
                pte |= PG_ACCESSED_MASK;
820
                if (is_dirty)
821
                    pte |= PG_DIRTY_MASK;
822
                stl_phys_notdirty(pte_addr, pte);
823
            }
824
            page_size = 4096;
825
            virt_addr = addr & ~0xfff;
826
        }
827
    }
828
    /* the page can be put in the TLB */
829
    prot = PAGE_READ;
830
    if (!(ptep & PG_NX_MASK))
831
        prot |= PAGE_EXEC;
832
    if (pte & PG_DIRTY_MASK) {
833
        /* only set write access if already dirty... otherwise wait
834
           for dirty access */
835
        if (is_user) {
836
            if (ptep & PG_RW_MASK)
837
                prot |= PAGE_WRITE;
838
        } else {
839
            if (!(env->cr[0] & CR0_WP_MASK) ||
840
                (ptep & PG_RW_MASK))
841
                prot |= PAGE_WRITE;
842
        }
843
    }
844
 do_mapping:
845
    pte = pte & env->a20_mask;
846

    
847
    /* Even if 4MB pages, we map only one 4KB page in the cache to
848
       avoid filling it too fast */
849
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
850
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
851
    vaddr = virt_addr + page_offset;
852

    
853
    tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
854
    return 0;
855
 do_fault_protect:
856
    error_code = PG_ERROR_P_MASK;
857
 do_fault:
858
    error_code |= (is_write << PG_ERROR_W_BIT);
859
    if (is_user)
860
        error_code |= PG_ERROR_U_MASK;
861
    if (is_write1 == 2 &&
862
        (env->efer & MSR_EFER_NXE) &&
863
        (env->cr[4] & CR4_PAE_MASK))
864
        error_code |= PG_ERROR_I_D_MASK;
865
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
866
        /* cr2 is not modified in case of exceptions */
867
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
868
                 addr);
869
    } else {
870
        env->cr[2] = addr;
871
    }
872
    env->error_code = error_code;
873
    env->exception_index = EXCP0E_PAGE;
874
    return 1;
875
}
876

    
877
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
878
{
879
    target_ulong pde_addr, pte_addr;
880
    uint64_t pte;
881
    target_phys_addr_t paddr;
882
    uint32_t page_offset;
883
    int page_size;
884

    
885
    if (env->cr[4] & CR4_PAE_MASK) {
886
        target_ulong pdpe_addr;
887
        uint64_t pde, pdpe;
888

    
889
#ifdef TARGET_X86_64
890
        if (env->hflags & HF_LMA_MASK) {
891
            uint64_t pml4e_addr, pml4e;
892
            int32_t sext;
893

    
894
            /* test virtual address sign extension */
895
            sext = (int64_t)addr >> 47;
896
            if (sext != 0 && sext != -1)
897
                return -1;
898

    
899
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
900
                env->a20_mask;
901
            pml4e = ldq_phys(pml4e_addr);
902
            if (!(pml4e & PG_PRESENT_MASK))
903
                return -1;
904

    
905
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
906
                env->a20_mask;
907
            pdpe = ldq_phys(pdpe_addr);
908
            if (!(pdpe & PG_PRESENT_MASK))
909
                return -1;
910
        } else
911
#endif
912
        {
913
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
914
                env->a20_mask;
915
            pdpe = ldq_phys(pdpe_addr);
916
            if (!(pdpe & PG_PRESENT_MASK))
917
                return -1;
918
        }
919

    
920
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
921
            env->a20_mask;
922
        pde = ldq_phys(pde_addr);
923
        if (!(pde & PG_PRESENT_MASK)) {
924
            return -1;
925
        }
926
        if (pde & PG_PSE_MASK) {
927
            /* 2 MB page */
928
            page_size = 2048 * 1024;
929
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
930
        } else {
931
            /* 4 KB page */
932
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
933
                env->a20_mask;
934
            page_size = 4096;
935
            pte = ldq_phys(pte_addr);
936
        }
937
        if (!(pte & PG_PRESENT_MASK))
938
            return -1;
939
    } else {
940
        uint32_t pde;
941

    
942
        if (!(env->cr[0] & CR0_PG_MASK)) {
943
            pte = addr;
944
            page_size = 4096;
945
        } else {
946
            /* page directory entry */
947
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
948
            pde = ldl_phys(pde_addr);
949
            if (!(pde & PG_PRESENT_MASK))
950
                return -1;
951
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
952
                pte = pde & ~0x003ff000; /* align to 4MB */
953
                page_size = 4096 * 1024;
954
            } else {
955
                /* page directory entry */
956
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
957
                pte = ldl_phys(pte_addr);
958
                if (!(pte & PG_PRESENT_MASK))
959
                    return -1;
960
                page_size = 4096;
961
            }
962
        }
963
        pte = pte & env->a20_mask;
964
    }
965

    
966
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
967
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
968
    return paddr;
969
}
970

    
971
void hw_breakpoint_insert(CPUState *env, int index)
972
{
973
    int type, err = 0;
974

    
975
    switch (hw_breakpoint_type(env->dr[7], index)) {
976
    case 0:
977
        if (hw_breakpoint_enabled(env->dr[7], index))
978
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
979
                                        &env->cpu_breakpoint[index]);
980
        break;
981
    case 1:
982
        type = BP_CPU | BP_MEM_WRITE;
983
        goto insert_wp;
984
    case 2:
985
         /* No support for I/O watchpoints yet */
986
        break;
987
    case 3:
988
        type = BP_CPU | BP_MEM_ACCESS;
989
    insert_wp:
990
        err = cpu_watchpoint_insert(env, env->dr[index],
991
                                    hw_breakpoint_len(env->dr[7], index),
992
                                    type, &env->cpu_watchpoint[index]);
993
        break;
994
    }
995
    if (err)
996
        env->cpu_breakpoint[index] = NULL;
997
}
998

    
999
void hw_breakpoint_remove(CPUState *env, int index)
1000
{
1001
    if (!env->cpu_breakpoint[index])
1002
        return;
1003
    switch (hw_breakpoint_type(env->dr[7], index)) {
1004
    case 0:
1005
        if (hw_breakpoint_enabled(env->dr[7], index))
1006
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1007
        break;
1008
    case 1:
1009
    case 3:
1010
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1011
        break;
1012
    case 2:
1013
        /* No support for I/O watchpoints yet */
1014
        break;
1015
    }
1016
}
1017

    
1018
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1019
{
1020
    target_ulong dr6;
1021
    int reg, type;
1022
    int hit_enabled = 0;
1023

    
1024
    dr6 = env->dr[6] & ~0xf;
1025
    for (reg = 0; reg < 4; reg++) {
1026
        type = hw_breakpoint_type(env->dr[7], reg);
1027
        if ((type == 0 && env->dr[reg] == env->eip) ||
1028
            ((type & 1) && env->cpu_watchpoint[reg] &&
1029
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1030
            dr6 |= 1 << reg;
1031
            if (hw_breakpoint_enabled(env->dr[7], reg))
1032
                hit_enabled = 1;
1033
        }
1034
    }
1035
    if (hit_enabled || force_dr6_update)
1036
        env->dr[6] = dr6;
1037
    return hit_enabled;
1038
}
1039

    
1040
static CPUDebugExcpHandler *prev_debug_excp_handler;
1041

    
1042
void raise_exception_env(int exception_index, CPUState *env);
1043

    
1044
static void breakpoint_handler(CPUState *env)
1045
{
1046
    CPUBreakpoint *bp;
1047

    
1048
    if (env->watchpoint_hit) {
1049
        if (env->watchpoint_hit->flags & BP_CPU) {
1050
            env->watchpoint_hit = NULL;
1051
            if (check_hw_breakpoints(env, 0))
1052
                raise_exception_env(EXCP01_DB, env);
1053
            else
1054
                cpu_resume_from_signal(env, NULL);
1055
        }
1056
    } else {
1057
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1058
            if (bp->pc == env->eip) {
1059
                if (bp->flags & BP_CPU) {
1060
                    check_hw_breakpoints(env, 1);
1061
                    raise_exception_env(EXCP01_DB, env);
1062
                }
1063
                break;
1064
            }
1065
    }
1066
    if (prev_debug_excp_handler)
1067
        prev_debug_excp_handler(env);
1068
}
1069

    
1070
typedef struct MCEInjectionParams {
1071
    Monitor *mon;
1072
    CPUState *env;
1073
    int bank;
1074
    uint64_t status;
1075
    uint64_t mcg_status;
1076
    uint64_t addr;
1077
    uint64_t misc;
1078
    int flags;
1079
} MCEInjectionParams;
1080

    
1081
static void do_inject_x86_mce(void *data)
1082
{
1083
    MCEInjectionParams *params = data;
1084
    CPUState *cenv = params->env;
1085
    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1086

    
1087
    cpu_synchronize_state(cenv);
1088

    
1089
    /*
1090
     * If there is an MCE exception being processed, ignore this SRAO MCE
1091
     * unless unconditional injection was requested.
1092
     */
1093
    if (!(params->flags & MCE_INJECT_UNCOND_AO)
1094
        && !(params->status & MCI_STATUS_AR)
1095
        && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1096
        return;
1097
    }
1098

    
1099
    if (params->status & MCI_STATUS_UC) {
1100
        /*
1101
         * if MSR_MCG_CTL is not all 1s, the uncorrected error
1102
         * reporting is disabled
1103
         */
1104
        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1105
            monitor_printf(params->mon,
1106
                           "CPU %d: Uncorrected error reporting disabled\n",
1107
                           cenv->cpu_index);
1108
            return;
1109
        }
1110

    
1111
        /*
1112
         * if MSR_MCi_CTL is not all 1s, the uncorrected error
1113
         * reporting is disabled for the bank
1114
         */
1115
        if (banks[0] != ~(uint64_t)0) {
1116
            monitor_printf(params->mon,
1117
                           "CPU %d: Uncorrected error reporting disabled for"
1118
                           " bank %d\n",
1119
                           cenv->cpu_index, params->bank);
1120
            return;
1121
        }
1122

    
1123
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1124
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1125
            monitor_printf(params->mon,
1126
                           "CPU %d: Previous MCE still in progress, raising"
1127
                           " triple fault\n",
1128
                           cenv->cpu_index);
1129
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1130
            qemu_system_reset_request();
1131
            return;
1132
        }
1133
        if (banks[1] & MCI_STATUS_VAL) {
1134
            params->status |= MCI_STATUS_OVER;
1135
        }
1136
        banks[2] = params->addr;
1137
        banks[3] = params->misc;
1138
        cenv->mcg_status = params->mcg_status;
1139
        banks[1] = params->status;
1140
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1141
    } else if (!(banks[1] & MCI_STATUS_VAL)
1142
               || !(banks[1] & MCI_STATUS_UC)) {
1143
        if (banks[1] & MCI_STATUS_VAL) {
1144
            params->status |= MCI_STATUS_OVER;
1145
        }
1146
        banks[2] = params->addr;
1147
        banks[3] = params->misc;
1148
        banks[1] = params->status;
1149
    } else {
1150
        banks[1] |= MCI_STATUS_OVER;
1151
    }
1152
}
1153

    
1154
void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1155
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
1156
                        uint64_t misc, int flags)
1157
{
1158
    MCEInjectionParams params = {
1159
        .mon = mon,
1160
        .env = cenv,
1161
        .bank = bank,
1162
        .status = status,
1163
        .mcg_status = mcg_status,
1164
        .addr = addr,
1165
        .misc = misc,
1166
        .flags = flags,
1167
    };
1168
    unsigned bank_num = cenv->mcg_cap & 0xff;
1169
    CPUState *env;
1170
    int flag = 0;
1171

    
1172
    if (!cenv->mcg_cap) {
1173
        monitor_printf(mon, "MCE injection not supported\n");
1174
        return;
1175
    }
1176
    if (bank >= bank_num) {
1177
        monitor_printf(mon, "Invalid MCE bank number\n");
1178
        return;
1179
    }
1180
    if (!(status & MCI_STATUS_VAL)) {
1181
        monitor_printf(mon, "Invalid MCE status code\n");
1182
        return;
1183
    }
1184
    if ((flags & MCE_INJECT_BROADCAST)
1185
        && !cpu_x86_support_mca_broadcast(cenv)) {
1186
        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1187
        return;
1188
    }
1189

    
1190
    if (kvm_enabled()) {
1191
        if (flags & MCE_INJECT_BROADCAST) {
1192
            flag |= MCE_BROADCAST;
1193
        }
1194

    
1195
        kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc, flag);
1196
    } else {
1197
        run_on_cpu(cenv, do_inject_x86_mce, &params);
1198
        if (flags & MCE_INJECT_BROADCAST) {
1199
            params.bank = 1;
1200
            params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1201
            params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1202
            params.addr = 0;
1203
            params.misc = 0;
1204
            for (env = first_cpu; env != NULL; env = env->next_cpu) {
1205
                if (cenv == env) {
1206
                    continue;
1207
                }
1208
                params.env = env;
1209
                run_on_cpu(cenv, do_inject_x86_mce, &params);
1210
            }
1211
        }
1212
    }
1213
}
1214
#endif /* !CONFIG_USER_ONLY */
1215

    
1216
static void mce_init(CPUX86State *cenv)
1217
{
1218
    unsigned int bank;
1219

    
1220
    if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1221
        && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1222
            (CPUID_MCE | CPUID_MCA)) {
1223
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1224
        cenv->mcg_ctl = ~(uint64_t)0;
1225
        for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1226
            cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1227
        }
1228
    }
1229
}
1230

    
1231
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1232
                            target_ulong *base, unsigned int *limit,
1233
                            unsigned int *flags)
1234
{
1235
    SegmentCache *dt;
1236
    target_ulong ptr;
1237
    uint32_t e1, e2;
1238
    int index;
1239

    
1240
    if (selector & 0x4)
1241
        dt = &env->ldt;
1242
    else
1243
        dt = &env->gdt;
1244
    index = selector & ~7;
1245
    ptr = dt->base + index;
1246
    if ((index + 7) > dt->limit
1247
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1248
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1249
        return 0;
1250

    
1251
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1252
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1253
    if (e2 & DESC_G_MASK)
1254
        *limit = (*limit << 12) | 0xfff;
1255
    *flags = e2;
1256

    
1257
    return 1;
1258
}
1259

    
1260
CPUX86State *cpu_x86_init(const char *cpu_model)
1261
{
1262
    CPUX86State *env;
1263
    static int inited;
1264

    
1265
    env = qemu_mallocz(sizeof(CPUX86State));
1266
    cpu_exec_init(env);
1267
    env->cpu_model_str = cpu_model;
1268

    
1269
    /* init various static tables */
1270
    if (!inited) {
1271
        inited = 1;
1272
        optimize_flags_init();
1273
#ifndef CONFIG_USER_ONLY
1274
        prev_debug_excp_handler =
1275
            cpu_set_debug_excp_handler(breakpoint_handler);
1276
#endif
1277
    }
1278
    if (cpu_x86_register(env, cpu_model) < 0) {
1279
        cpu_x86_close(env);
1280
        return NULL;
1281
    }
1282
    mce_init(env);
1283

    
1284
    qemu_init_vcpu(env);
1285

    
1286
    return env;
1287
}
1288

    
1289
#if !defined(CONFIG_USER_ONLY)
1290
void do_cpu_init(CPUState *env)
1291
{
1292
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1293
    cpu_reset(env);
1294
    env->interrupt_request = sipi;
1295
    apic_init_reset(env->apic_state);
1296
    env->halted = !cpu_is_bsp(env);
1297
}
1298

    
1299
void do_cpu_sipi(CPUState *env)
1300
{
1301
    apic_sipi(env->apic_state);
1302
}
1303
#else
1304
void do_cpu_init(CPUState *env)
1305
{
1306
}
1307
void do_cpu_sipi(CPUState *env)
1308
{
1309
}
1310
#endif