Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ c31da136

History | View | Annotate | Download (40.5 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include <stdarg.h>
20
#include <stdlib.h>
21
#include <stdio.h>
22
#include <string.h>
23
#include <inttypes.h>
24
#include <signal.h>
25

    
26
#include "cpu.h"
27
#include "exec-all.h"
28
#include "qemu-common.h"
29
#include "kvm.h"
30
#ifndef CONFIG_USER_ONLY
31
#include "sysemu.h"
32
#include "monitor.h"
33
#endif
34

    
35
//#define DEBUG_MMU
36

    
37
/* NOTE: must be called outside the CPU execute loop */
38
void cpu_reset(CPUX86State *env)
39
{
40
    int i;
41

    
42
    if (qemu_loglevel_mask(CPU_LOG_RESET)) {
43
        qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
44
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
45
    }
46

    
47
    memset(env, 0, offsetof(CPUX86State, breakpoints));
48

    
49
    tlb_flush(env, 1);
50

    
51
    env->old_exception = -1;
52

    
53
    /* init to reset state */
54

    
55
#ifdef CONFIG_SOFTMMU
56
    env->hflags |= HF_SOFTMMU_MASK;
57
#endif
58
    env->hflags2 |= HF2_GIF_MASK;
59

    
60
    cpu_x86_update_cr0(env, 0x60000010);
61
    env->a20_mask = ~0x0;
62
    env->smbase = 0x30000;
63

    
64
    env->idt.limit = 0xffff;
65
    env->gdt.limit = 0xffff;
66
    env->ldt.limit = 0xffff;
67
    env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
68
    env->tr.limit = 0xffff;
69
    env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
70

    
71
    cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
72
                           DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
73
                           DESC_R_MASK | DESC_A_MASK);
74
    cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
75
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76
                           DESC_A_MASK);
77
    cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
78
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79
                           DESC_A_MASK);
80
    cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
81
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82
                           DESC_A_MASK);
83
    cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
84
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85
                           DESC_A_MASK);
86
    cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
87
                           DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
88
                           DESC_A_MASK);
89

    
90
    env->eip = 0xfff0;
91
    env->regs[R_EDX] = env->cpuid_version;
92

    
93
    env->eflags = 0x2;
94

    
95
    /* FPU init */
96
    for(i = 0;i < 8; i++)
97
        env->fptags[i] = 1;
98
    env->fpuc = 0x37f;
99

    
100
    env->mxcsr = 0x1f80;
101

    
102
    env->pat = 0x0007040600070406ULL;
103

    
104
    memset(env->dr, 0, sizeof(env->dr));
105
    env->dr[6] = DR6_FIXED_1;
106
    env->dr[7] = DR7_FIXED_1;
107
    cpu_breakpoint_remove_all(env, BP_CPU);
108
    cpu_watchpoint_remove_all(env, BP_CPU);
109
}
110

    
111
void cpu_x86_close(CPUX86State *env)
112
{
113
    qemu_free(env);
114
}
115

    
116
static void cpu_x86_version(CPUState *env, int *family, int *model)
117
{
118
    int cpuver = env->cpuid_version;
119

    
120
    if (family == NULL || model == NULL) {
121
        return;
122
    }
123

    
124
    *family = (cpuver >> 8) & 0x0f;
125
    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
126
}
127

    
128
/* Broadcast MCA signal for processor version 06H_EH and above */
129
int cpu_x86_support_mca_broadcast(CPUState *env)
130
{
131
    int family = 0;
132
    int model = 0;
133

    
134
    cpu_x86_version(env, &family, &model);
135
    if ((family == 6 && model >= 14) || family > 6) {
136
        return 1;
137
    }
138

    
139
    return 0;
140
}
141

    
142
/***********************************************************/
143
/* x86 debug */
144

    
145
static const char *cc_op_str[] = {
146
    "DYNAMIC",
147
    "EFLAGS",
148

    
149
    "MULB",
150
    "MULW",
151
    "MULL",
152
    "MULQ",
153

    
154
    "ADDB",
155
    "ADDW",
156
    "ADDL",
157
    "ADDQ",
158

    
159
    "ADCB",
160
    "ADCW",
161
    "ADCL",
162
    "ADCQ",
163

    
164
    "SUBB",
165
    "SUBW",
166
    "SUBL",
167
    "SUBQ",
168

    
169
    "SBBB",
170
    "SBBW",
171
    "SBBL",
172
    "SBBQ",
173

    
174
    "LOGICB",
175
    "LOGICW",
176
    "LOGICL",
177
    "LOGICQ",
178

    
179
    "INCB",
180
    "INCW",
181
    "INCL",
182
    "INCQ",
183

    
184
    "DECB",
185
    "DECW",
186
    "DECL",
187
    "DECQ",
188

    
189
    "SHLB",
190
    "SHLW",
191
    "SHLL",
192
    "SHLQ",
193

    
194
    "SARB",
195
    "SARW",
196
    "SARL",
197
    "SARQ",
198
};
199

    
200
static void
201
cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
202
                       const char *name, struct SegmentCache *sc)
203
{
204
#ifdef TARGET_X86_64
205
    if (env->hflags & HF_CS64_MASK) {
206
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
207
                    sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
208
    } else
209
#endif
210
    {
211
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
212
                    (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
213
    }
214

    
215
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
216
        goto done;
217

    
218
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
219
    if (sc->flags & DESC_S_MASK) {
220
        if (sc->flags & DESC_CS_MASK) {
221
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
222
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
223
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
224
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
225
        } else {
226
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
227
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
228
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
229
        }
230
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
231
    } else {
232
        static const char *sys_type_name[2][16] = {
233
            { /* 32 bit mode */
234
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
235
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
236
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
237
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
238
            },
239
            { /* 64 bit mode */
240
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
241
                "Reserved", "Reserved", "Reserved", "Reserved",
242
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
243
                "Reserved", "IntGate64", "TrapGate64"
244
            }
245
        };
246
        cpu_fprintf(f, "%s",
247
                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
248
                                 [(sc->flags & DESC_TYPE_MASK)
249
                                  >> DESC_TYPE_SHIFT]);
250
    }
251
done:
252
    cpu_fprintf(f, "\n");
253
}
254

    
255
#define DUMP_CODE_BYTES_TOTAL    50
256
#define DUMP_CODE_BYTES_BACKWARD 20
257

    
258
void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
259
                    int flags)
260
{
261
    int eflags, i, nb;
262
    char cc_op_name[32];
263
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
264

    
265
    cpu_synchronize_state(env);
266

    
267
    eflags = env->eflags;
268
#ifdef TARGET_X86_64
269
    if (env->hflags & HF_CS64_MASK) {
270
        cpu_fprintf(f,
271
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
272
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
273
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
274
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
275
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
276
                    env->regs[R_EAX],
277
                    env->regs[R_EBX],
278
                    env->regs[R_ECX],
279
                    env->regs[R_EDX],
280
                    env->regs[R_ESI],
281
                    env->regs[R_EDI],
282
                    env->regs[R_EBP],
283
                    env->regs[R_ESP],
284
                    env->regs[8],
285
                    env->regs[9],
286
                    env->regs[10],
287
                    env->regs[11],
288
                    env->regs[12],
289
                    env->regs[13],
290
                    env->regs[14],
291
                    env->regs[15],
292
                    env->eip, eflags,
293
                    eflags & DF_MASK ? 'D' : '-',
294
                    eflags & CC_O ? 'O' : '-',
295
                    eflags & CC_S ? 'S' : '-',
296
                    eflags & CC_Z ? 'Z' : '-',
297
                    eflags & CC_A ? 'A' : '-',
298
                    eflags & CC_P ? 'P' : '-',
299
                    eflags & CC_C ? 'C' : '-',
300
                    env->hflags & HF_CPL_MASK,
301
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
302
                    (env->a20_mask >> 20) & 1,
303
                    (env->hflags >> HF_SMM_SHIFT) & 1,
304
                    env->halted);
305
    } else
306
#endif
307
    {
308
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
309
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
310
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
311
                    (uint32_t)env->regs[R_EAX],
312
                    (uint32_t)env->regs[R_EBX],
313
                    (uint32_t)env->regs[R_ECX],
314
                    (uint32_t)env->regs[R_EDX],
315
                    (uint32_t)env->regs[R_ESI],
316
                    (uint32_t)env->regs[R_EDI],
317
                    (uint32_t)env->regs[R_EBP],
318
                    (uint32_t)env->regs[R_ESP],
319
                    (uint32_t)env->eip, eflags,
320
                    eflags & DF_MASK ? 'D' : '-',
321
                    eflags & CC_O ? 'O' : '-',
322
                    eflags & CC_S ? 'S' : '-',
323
                    eflags & CC_Z ? 'Z' : '-',
324
                    eflags & CC_A ? 'A' : '-',
325
                    eflags & CC_P ? 'P' : '-',
326
                    eflags & CC_C ? 'C' : '-',
327
                    env->hflags & HF_CPL_MASK,
328
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
329
                    (env->a20_mask >> 20) & 1,
330
                    (env->hflags >> HF_SMM_SHIFT) & 1,
331
                    env->halted);
332
    }
333

    
334
    for(i = 0; i < 6; i++) {
335
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
336
                               &env->segs[i]);
337
    }
338
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
339
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
340

    
341
#ifdef TARGET_X86_64
342
    if (env->hflags & HF_LMA_MASK) {
343
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
344
                    env->gdt.base, env->gdt.limit);
345
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
346
                    env->idt.base, env->idt.limit);
347
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
348
                    (uint32_t)env->cr[0],
349
                    env->cr[2],
350
                    env->cr[3],
351
                    (uint32_t)env->cr[4]);
352
        for(i = 0; i < 4; i++)
353
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
354
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
355
                    env->dr[6], env->dr[7]);
356
    } else
357
#endif
358
    {
359
        cpu_fprintf(f, "GDT=     %08x %08x\n",
360
                    (uint32_t)env->gdt.base, env->gdt.limit);
361
        cpu_fprintf(f, "IDT=     %08x %08x\n",
362
                    (uint32_t)env->idt.base, env->idt.limit);
363
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
364
                    (uint32_t)env->cr[0],
365
                    (uint32_t)env->cr[2],
366
                    (uint32_t)env->cr[3],
367
                    (uint32_t)env->cr[4]);
368
        for(i = 0; i < 4; i++) {
369
            cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
370
        }
371
        cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
372
                    env->dr[6], env->dr[7]);
373
    }
374
    if (flags & X86_DUMP_CCOP) {
375
        if ((unsigned)env->cc_op < CC_OP_NB)
376
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
377
        else
378
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
379
#ifdef TARGET_X86_64
380
        if (env->hflags & HF_CS64_MASK) {
381
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
382
                        env->cc_src, env->cc_dst,
383
                        cc_op_name);
384
        } else
385
#endif
386
        {
387
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
388
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
389
                        cc_op_name);
390
        }
391
    }
392
    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
393
    if (flags & X86_DUMP_FPU) {
394
        int fptag;
395
        fptag = 0;
396
        for(i = 0; i < 8; i++) {
397
            fptag |= ((!env->fptags[i]) << i);
398
        }
399
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
400
                    env->fpuc,
401
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
402
                    env->fpstt,
403
                    fptag,
404
                    env->mxcsr);
405
        for(i=0;i<8;i++) {
406
            CPU_LDoubleU u;
407
            u.d = env->fpregs[i].d;
408
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
409
                        i, u.l.lower, u.l.upper);
410
            if ((i & 1) == 1)
411
                cpu_fprintf(f, "\n");
412
            else
413
                cpu_fprintf(f, " ");
414
        }
415
        if (env->hflags & HF_CS64_MASK)
416
            nb = 16;
417
        else
418
            nb = 8;
419
        for(i=0;i<nb;i++) {
420
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
421
                        i,
422
                        env->xmm_regs[i].XMM_L(3),
423
                        env->xmm_regs[i].XMM_L(2),
424
                        env->xmm_regs[i].XMM_L(1),
425
                        env->xmm_regs[i].XMM_L(0));
426
            if ((i & 1) == 1)
427
                cpu_fprintf(f, "\n");
428
            else
429
                cpu_fprintf(f, " ");
430
        }
431
    }
432
    if (flags & CPU_DUMP_CODE) {
433
        target_ulong base = env->segs[R_CS].base + env->eip;
434
        target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
435
        uint8_t code;
436
        char codestr[3];
437

    
438
        cpu_fprintf(f, "Code=");
439
        for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
440
            if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
441
                snprintf(codestr, sizeof(codestr), "%02x", code);
442
            } else {
443
                snprintf(codestr, sizeof(codestr), "??");
444
            }
445
            cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
446
                        i == offs ? "<" : "", codestr, i == offs ? ">" : "");
447
        }
448
        cpu_fprintf(f, "\n");
449
    }
450
}
451

    
452
/***********************************************************/
453
/* x86 mmu */
454
/* XXX: add PGE support */
455

    
456
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
457
{
458
    a20_state = (a20_state != 0);
459
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
460
#if defined(DEBUG_MMU)
461
        printf("A20 update: a20=%d\n", a20_state);
462
#endif
463
        /* if the cpu is currently executing code, we must unlink it and
464
           all the potentially executing TB */
465
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
466

    
467
        /* when a20 is changed, all the MMU mappings are invalid, so
468
           we must flush everything */
469
        tlb_flush(env, 1);
470
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
471
    }
472
}
473

    
474
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
475
{
476
    int pe_state;
477

    
478
#if defined(DEBUG_MMU)
479
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
480
#endif
481
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
482
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
483
        tlb_flush(env, 1);
484
    }
485

    
486
#ifdef TARGET_X86_64
487
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
488
        (env->efer & MSR_EFER_LME)) {
489
        /* enter in long mode */
490
        /* XXX: generate an exception */
491
        if (!(env->cr[4] & CR4_PAE_MASK))
492
            return;
493
        env->efer |= MSR_EFER_LMA;
494
        env->hflags |= HF_LMA_MASK;
495
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
496
               (env->efer & MSR_EFER_LMA)) {
497
        /* exit long mode */
498
        env->efer &= ~MSR_EFER_LMA;
499
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
500
        env->eip &= 0xffffffff;
501
    }
502
#endif
503
    env->cr[0] = new_cr0 | CR0_ET_MASK;
504

    
505
    /* update PE flag in hidden flags */
506
    pe_state = (env->cr[0] & CR0_PE_MASK);
507
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
508
    /* ensure that ADDSEG is always set in real mode */
509
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
510
    /* update FPU flags */
511
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
512
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
513
}
514

    
515
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
516
   the PDPT */
517
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
518
{
519
    env->cr[3] = new_cr3;
520
    if (env->cr[0] & CR0_PG_MASK) {
521
#if defined(DEBUG_MMU)
522
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
523
#endif
524
        tlb_flush(env, 0);
525
    }
526
}
527

    
528
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
529
{
530
#if defined(DEBUG_MMU)
531
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
532
#endif
533
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
534
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
535
        tlb_flush(env, 1);
536
    }
537
    /* SSE handling */
538
    if (!(env->cpuid_features & CPUID_SSE))
539
        new_cr4 &= ~CR4_OSFXSR_MASK;
540
    if (new_cr4 & CR4_OSFXSR_MASK)
541
        env->hflags |= HF_OSFXSR_MASK;
542
    else
543
        env->hflags &= ~HF_OSFXSR_MASK;
544

    
545
    env->cr[4] = new_cr4;
546
}
547

    
548
#if defined(CONFIG_USER_ONLY)
549

    
550
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
551
                             int is_write, int mmu_idx, int is_softmmu)
552
{
553
    /* user mode only emulation */
554
    is_write &= 1;
555
    env->cr[2] = addr;
556
    env->error_code = (is_write << PG_ERROR_W_BIT);
557
    env->error_code |= PG_ERROR_U_MASK;
558
    env->exception_index = EXCP0E_PAGE;
559
    return 1;
560
}
561

    
562
#else
563

    
564
/* XXX: This value should match the one returned by CPUID
565
 * and in exec.c */
566
# if defined(TARGET_X86_64)
567
# define PHYS_ADDR_MASK 0xfffffff000LL
568
# else
569
# define PHYS_ADDR_MASK 0xffffff000LL
570
# endif
571

    
572
/* return value:
573
   -1 = cannot handle fault
574
   0  = nothing more to do
575
   1  = generate PF fault
576
*/
577
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
578
                             int is_write1, int mmu_idx, int is_softmmu)
579
{
580
    uint64_t ptep, pte;
581
    target_ulong pde_addr, pte_addr;
582
    int error_code, is_dirty, prot, page_size, is_write, is_user;
583
    target_phys_addr_t paddr;
584
    uint32_t page_offset;
585
    target_ulong vaddr, virt_addr;
586

    
587
    is_user = mmu_idx == MMU_USER_IDX;
588
#if defined(DEBUG_MMU)
589
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
590
           addr, is_write1, is_user, env->eip);
591
#endif
592
    is_write = is_write1 & 1;
593

    
594
    if (!(env->cr[0] & CR0_PG_MASK)) {
595
        pte = addr;
596
        virt_addr = addr & TARGET_PAGE_MASK;
597
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
598
        page_size = 4096;
599
        goto do_mapping;
600
    }
601

    
602
    if (env->cr[4] & CR4_PAE_MASK) {
603
        uint64_t pde, pdpe;
604
        target_ulong pdpe_addr;
605

    
606
#ifdef TARGET_X86_64
607
        if (env->hflags & HF_LMA_MASK) {
608
            uint64_t pml4e_addr, pml4e;
609
            int32_t sext;
610

    
611
            /* test virtual address sign extension */
612
            sext = (int64_t)addr >> 47;
613
            if (sext != 0 && sext != -1) {
614
                env->error_code = 0;
615
                env->exception_index = EXCP0D_GPF;
616
                return 1;
617
            }
618

    
619
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
620
                env->a20_mask;
621
            pml4e = ldq_phys(pml4e_addr);
622
            if (!(pml4e & PG_PRESENT_MASK)) {
623
                error_code = 0;
624
                goto do_fault;
625
            }
626
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
627
                error_code = PG_ERROR_RSVD_MASK;
628
                goto do_fault;
629
            }
630
            if (!(pml4e & PG_ACCESSED_MASK)) {
631
                pml4e |= PG_ACCESSED_MASK;
632
                stl_phys_notdirty(pml4e_addr, pml4e);
633
            }
634
            ptep = pml4e ^ PG_NX_MASK;
635
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
636
                env->a20_mask;
637
            pdpe = ldq_phys(pdpe_addr);
638
            if (!(pdpe & PG_PRESENT_MASK)) {
639
                error_code = 0;
640
                goto do_fault;
641
            }
642
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
643
                error_code = PG_ERROR_RSVD_MASK;
644
                goto do_fault;
645
            }
646
            ptep &= pdpe ^ PG_NX_MASK;
647
            if (!(pdpe & PG_ACCESSED_MASK)) {
648
                pdpe |= PG_ACCESSED_MASK;
649
                stl_phys_notdirty(pdpe_addr, pdpe);
650
            }
651
        } else
652
#endif
653
        {
654
            /* XXX: load them when cr3 is loaded ? */
655
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
656
                env->a20_mask;
657
            pdpe = ldq_phys(pdpe_addr);
658
            if (!(pdpe & PG_PRESENT_MASK)) {
659
                error_code = 0;
660
                goto do_fault;
661
            }
662
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
663
        }
664

    
665
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
666
            env->a20_mask;
667
        pde = ldq_phys(pde_addr);
668
        if (!(pde & PG_PRESENT_MASK)) {
669
            error_code = 0;
670
            goto do_fault;
671
        }
672
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
673
            error_code = PG_ERROR_RSVD_MASK;
674
            goto do_fault;
675
        }
676
        ptep &= pde ^ PG_NX_MASK;
677
        if (pde & PG_PSE_MASK) {
678
            /* 2 MB page */
679
            page_size = 2048 * 1024;
680
            ptep ^= PG_NX_MASK;
681
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
682
                goto do_fault_protect;
683
            if (is_user) {
684
                if (!(ptep & PG_USER_MASK))
685
                    goto do_fault_protect;
686
                if (is_write && !(ptep & PG_RW_MASK))
687
                    goto do_fault_protect;
688
            } else {
689
                if ((env->cr[0] & CR0_WP_MASK) &&
690
                    is_write && !(ptep & PG_RW_MASK))
691
                    goto do_fault_protect;
692
            }
693
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
694
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
695
                pde |= PG_ACCESSED_MASK;
696
                if (is_dirty)
697
                    pde |= PG_DIRTY_MASK;
698
                stl_phys_notdirty(pde_addr, pde);
699
            }
700
            /* align to page_size */
701
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
702
            virt_addr = addr & ~(page_size - 1);
703
        } else {
704
            /* 4 KB page */
705
            if (!(pde & PG_ACCESSED_MASK)) {
706
                pde |= PG_ACCESSED_MASK;
707
                stl_phys_notdirty(pde_addr, pde);
708
            }
709
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
710
                env->a20_mask;
711
            pte = ldq_phys(pte_addr);
712
            if (!(pte & PG_PRESENT_MASK)) {
713
                error_code = 0;
714
                goto do_fault;
715
            }
716
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
717
                error_code = PG_ERROR_RSVD_MASK;
718
                goto do_fault;
719
            }
720
            /* combine pde and pte nx, user and rw protections */
721
            ptep &= pte ^ PG_NX_MASK;
722
            ptep ^= PG_NX_MASK;
723
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
724
                goto do_fault_protect;
725
            if (is_user) {
726
                if (!(ptep & PG_USER_MASK))
727
                    goto do_fault_protect;
728
                if (is_write && !(ptep & PG_RW_MASK))
729
                    goto do_fault_protect;
730
            } else {
731
                if ((env->cr[0] & CR0_WP_MASK) &&
732
                    is_write && !(ptep & PG_RW_MASK))
733
                    goto do_fault_protect;
734
            }
735
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
736
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
737
                pte |= PG_ACCESSED_MASK;
738
                if (is_dirty)
739
                    pte |= PG_DIRTY_MASK;
740
                stl_phys_notdirty(pte_addr, pte);
741
            }
742
            page_size = 4096;
743
            virt_addr = addr & ~0xfff;
744
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
745
        }
746
    } else {
747
        uint32_t pde;
748

    
749
        /* page directory entry */
750
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
751
            env->a20_mask;
752
        pde = ldl_phys(pde_addr);
753
        if (!(pde & PG_PRESENT_MASK)) {
754
            error_code = 0;
755
            goto do_fault;
756
        }
757
        /* if PSE bit is set, then we use a 4MB page */
758
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
759
            page_size = 4096 * 1024;
760
            if (is_user) {
761
                if (!(pde & PG_USER_MASK))
762
                    goto do_fault_protect;
763
                if (is_write && !(pde & PG_RW_MASK))
764
                    goto do_fault_protect;
765
            } else {
766
                if ((env->cr[0] & CR0_WP_MASK) &&
767
                    is_write && !(pde & PG_RW_MASK))
768
                    goto do_fault_protect;
769
            }
770
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
771
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
772
                pde |= PG_ACCESSED_MASK;
773
                if (is_dirty)
774
                    pde |= PG_DIRTY_MASK;
775
                stl_phys_notdirty(pde_addr, pde);
776
            }
777

    
778
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
779
            ptep = pte;
780
            virt_addr = addr & ~(page_size - 1);
781
        } else {
782
            if (!(pde & PG_ACCESSED_MASK)) {
783
                pde |= PG_ACCESSED_MASK;
784
                stl_phys_notdirty(pde_addr, pde);
785
            }
786

    
787
            /* page directory entry */
788
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
789
                env->a20_mask;
790
            pte = ldl_phys(pte_addr);
791
            if (!(pte & PG_PRESENT_MASK)) {
792
                error_code = 0;
793
                goto do_fault;
794
            }
795
            /* combine pde and pte user and rw protections */
796
            ptep = pte & pde;
797
            if (is_user) {
798
                if (!(ptep & PG_USER_MASK))
799
                    goto do_fault_protect;
800
                if (is_write && !(ptep & PG_RW_MASK))
801
                    goto do_fault_protect;
802
            } else {
803
                if ((env->cr[0] & CR0_WP_MASK) &&
804
                    is_write && !(ptep & PG_RW_MASK))
805
                    goto do_fault_protect;
806
            }
807
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
808
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
809
                pte |= PG_ACCESSED_MASK;
810
                if (is_dirty)
811
                    pte |= PG_DIRTY_MASK;
812
                stl_phys_notdirty(pte_addr, pte);
813
            }
814
            page_size = 4096;
815
            virt_addr = addr & ~0xfff;
816
        }
817
    }
818
    /* the page can be put in the TLB */
819
    prot = PAGE_READ;
820
    if (!(ptep & PG_NX_MASK))
821
        prot |= PAGE_EXEC;
822
    if (pte & PG_DIRTY_MASK) {
823
        /* only set write access if already dirty... otherwise wait
824
           for dirty access */
825
        if (is_user) {
826
            if (ptep & PG_RW_MASK)
827
                prot |= PAGE_WRITE;
828
        } else {
829
            if (!(env->cr[0] & CR0_WP_MASK) ||
830
                (ptep & PG_RW_MASK))
831
                prot |= PAGE_WRITE;
832
        }
833
    }
834
 do_mapping:
835
    pte = pte & env->a20_mask;
836

    
837
    /* Even if 4MB pages, we map only one 4KB page in the cache to
838
       avoid filling it too fast */
839
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
840
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
841
    vaddr = virt_addr + page_offset;
842

    
843
    tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
844
    return 0;
845
 do_fault_protect:
846
    error_code = PG_ERROR_P_MASK;
847
 do_fault:
848
    error_code |= (is_write << PG_ERROR_W_BIT);
849
    if (is_user)
850
        error_code |= PG_ERROR_U_MASK;
851
    if (is_write1 == 2 &&
852
        (env->efer & MSR_EFER_NXE) &&
853
        (env->cr[4] & CR4_PAE_MASK))
854
        error_code |= PG_ERROR_I_D_MASK;
855
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
856
        /* cr2 is not modified in case of exceptions */
857
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
858
                 addr);
859
    } else {
860
        env->cr[2] = addr;
861
    }
862
    env->error_code = error_code;
863
    env->exception_index = EXCP0E_PAGE;
864
    return 1;
865
}
866

    
867
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
868
{
869
    target_ulong pde_addr, pte_addr;
870
    uint64_t pte;
871
    target_phys_addr_t paddr;
872
    uint32_t page_offset;
873
    int page_size;
874

    
875
    if (env->cr[4] & CR4_PAE_MASK) {
876
        target_ulong pdpe_addr;
877
        uint64_t pde, pdpe;
878

    
879
#ifdef TARGET_X86_64
880
        if (env->hflags & HF_LMA_MASK) {
881
            uint64_t pml4e_addr, pml4e;
882
            int32_t sext;
883

    
884
            /* test virtual address sign extension */
885
            sext = (int64_t)addr >> 47;
886
            if (sext != 0 && sext != -1)
887
                return -1;
888

    
889
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
890
                env->a20_mask;
891
            pml4e = ldq_phys(pml4e_addr);
892
            if (!(pml4e & PG_PRESENT_MASK))
893
                return -1;
894

    
895
            pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
896
                env->a20_mask;
897
            pdpe = ldq_phys(pdpe_addr);
898
            if (!(pdpe & PG_PRESENT_MASK))
899
                return -1;
900
        } else
901
#endif
902
        {
903
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
904
                env->a20_mask;
905
            pdpe = ldq_phys(pdpe_addr);
906
            if (!(pdpe & PG_PRESENT_MASK))
907
                return -1;
908
        }
909

    
910
        pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
911
            env->a20_mask;
912
        pde = ldq_phys(pde_addr);
913
        if (!(pde & PG_PRESENT_MASK)) {
914
            return -1;
915
        }
916
        if (pde & PG_PSE_MASK) {
917
            /* 2 MB page */
918
            page_size = 2048 * 1024;
919
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
920
        } else {
921
            /* 4 KB page */
922
            pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
923
                env->a20_mask;
924
            page_size = 4096;
925
            pte = ldq_phys(pte_addr);
926
        }
927
        if (!(pte & PG_PRESENT_MASK))
928
            return -1;
929
    } else {
930
        uint32_t pde;
931

    
932
        if (!(env->cr[0] & CR0_PG_MASK)) {
933
            pte = addr;
934
            page_size = 4096;
935
        } else {
936
            /* page directory entry */
937
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
938
            pde = ldl_phys(pde_addr);
939
            if (!(pde & PG_PRESENT_MASK))
940
                return -1;
941
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
942
                pte = pde & ~0x003ff000; /* align to 4MB */
943
                page_size = 4096 * 1024;
944
            } else {
945
                /* page directory entry */
946
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
947
                pte = ldl_phys(pte_addr);
948
                if (!(pte & PG_PRESENT_MASK))
949
                    return -1;
950
                page_size = 4096;
951
            }
952
        }
953
        pte = pte & env->a20_mask;
954
    }
955

    
956
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
957
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
958
    return paddr;
959
}
960

    
961
void hw_breakpoint_insert(CPUState *env, int index)
962
{
963
    int type, err = 0;
964

    
965
    switch (hw_breakpoint_type(env->dr[7], index)) {
966
    case 0:
967
        if (hw_breakpoint_enabled(env->dr[7], index))
968
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
969
                                        &env->cpu_breakpoint[index]);
970
        break;
971
    case 1:
972
        type = BP_CPU | BP_MEM_WRITE;
973
        goto insert_wp;
974
    case 2:
975
         /* No support for I/O watchpoints yet */
976
        break;
977
    case 3:
978
        type = BP_CPU | BP_MEM_ACCESS;
979
    insert_wp:
980
        err = cpu_watchpoint_insert(env, env->dr[index],
981
                                    hw_breakpoint_len(env->dr[7], index),
982
                                    type, &env->cpu_watchpoint[index]);
983
        break;
984
    }
985
    if (err)
986
        env->cpu_breakpoint[index] = NULL;
987
}
988

    
989
void hw_breakpoint_remove(CPUState *env, int index)
990
{
991
    if (!env->cpu_breakpoint[index])
992
        return;
993
    switch (hw_breakpoint_type(env->dr[7], index)) {
994
    case 0:
995
        if (hw_breakpoint_enabled(env->dr[7], index))
996
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
997
        break;
998
    case 1:
999
    case 3:
1000
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1001
        break;
1002
    case 2:
1003
        /* No support for I/O watchpoints yet */
1004
        break;
1005
    }
1006
}
1007

    
1008
int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1009
{
1010
    target_ulong dr6;
1011
    int reg, type;
1012
    int hit_enabled = 0;
1013

    
1014
    dr6 = env->dr[6] & ~0xf;
1015
    for (reg = 0; reg < 4; reg++) {
1016
        type = hw_breakpoint_type(env->dr[7], reg);
1017
        if ((type == 0 && env->dr[reg] == env->eip) ||
1018
            ((type & 1) && env->cpu_watchpoint[reg] &&
1019
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1020
            dr6 |= 1 << reg;
1021
            if (hw_breakpoint_enabled(env->dr[7], reg))
1022
                hit_enabled = 1;
1023
        }
1024
    }
1025
    if (hit_enabled || force_dr6_update)
1026
        env->dr[6] = dr6;
1027
    return hit_enabled;
1028
}
1029

    
1030
static CPUDebugExcpHandler *prev_debug_excp_handler;
1031

    
1032
void raise_exception_env(int exception_index, CPUState *env);
1033

    
1034
static void breakpoint_handler(CPUState *env)
1035
{
1036
    CPUBreakpoint *bp;
1037

    
1038
    if (env->watchpoint_hit) {
1039
        if (env->watchpoint_hit->flags & BP_CPU) {
1040
            env->watchpoint_hit = NULL;
1041
            if (check_hw_breakpoints(env, 0))
1042
                raise_exception_env(EXCP01_DB, env);
1043
            else
1044
                cpu_resume_from_signal(env, NULL);
1045
        }
1046
    } else {
1047
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1048
            if (bp->pc == env->eip) {
1049
                if (bp->flags & BP_CPU) {
1050
                    check_hw_breakpoints(env, 1);
1051
                    raise_exception_env(EXCP01_DB, env);
1052
                }
1053
                break;
1054
            }
1055
    }
1056
    if (prev_debug_excp_handler)
1057
        prev_debug_excp_handler(env);
1058
}
1059

    
1060
typedef struct MCEInjectionParams {
1061
    Monitor *mon;
1062
    CPUState *env;
1063
    int bank;
1064
    uint64_t status;
1065
    uint64_t mcg_status;
1066
    uint64_t addr;
1067
    uint64_t misc;
1068
    int flags;
1069
} MCEInjectionParams;
1070

    
1071
static void do_inject_x86_mce(void *data)
1072
{
1073
    MCEInjectionParams *params = data;
1074
    CPUState *cenv = params->env;
1075
    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1076

    
1077
    cpu_synchronize_state(cenv);
1078

    
1079
    /*
1080
     * If there is an MCE exception being processed, ignore this SRAO MCE
1081
     * unless unconditional injection was requested.
1082
     */
1083
    if (!(params->flags & MCE_INJECT_UNCOND_AO)
1084
        && !(params->status & MCI_STATUS_AR)
1085
        && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1086
        return;
1087
    }
1088

    
1089
    if (params->status & MCI_STATUS_UC) {
1090
        /*
1091
         * if MSR_MCG_CTL is not all 1s, the uncorrected error
1092
         * reporting is disabled
1093
         */
1094
        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1095
            monitor_printf(params->mon,
1096
                           "CPU %d: Uncorrected error reporting disabled\n",
1097
                           cenv->cpu_index);
1098
            return;
1099
        }
1100

    
1101
        /*
1102
         * if MSR_MCi_CTL is not all 1s, the uncorrected error
1103
         * reporting is disabled for the bank
1104
         */
1105
        if (banks[0] != ~(uint64_t)0) {
1106
            monitor_printf(params->mon,
1107
                           "CPU %d: Uncorrected error reporting disabled for"
1108
                           " bank %d\n",
1109
                           cenv->cpu_index, params->bank);
1110
            return;
1111
        }
1112

    
1113
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1114
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1115
            monitor_printf(params->mon,
1116
                           "CPU %d: Previous MCE still in progress, raising"
1117
                           " triple fault\n",
1118
                           cenv->cpu_index);
1119
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1120
            qemu_system_reset_request();
1121
            return;
1122
        }
1123
        if (banks[1] & MCI_STATUS_VAL) {
1124
            params->status |= MCI_STATUS_OVER;
1125
        }
1126
        banks[2] = params->addr;
1127
        banks[3] = params->misc;
1128
        cenv->mcg_status = params->mcg_status;
1129
        banks[1] = params->status;
1130
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1131
    } else if (!(banks[1] & MCI_STATUS_VAL)
1132
               || !(banks[1] & MCI_STATUS_UC)) {
1133
        if (banks[1] & MCI_STATUS_VAL) {
1134
            params->status |= MCI_STATUS_OVER;
1135
        }
1136
        banks[2] = params->addr;
1137
        banks[3] = params->misc;
1138
        banks[1] = params->status;
1139
    } else {
1140
        banks[1] |= MCI_STATUS_OVER;
1141
    }
1142
}
1143

    
1144
void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1145
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
1146
                        uint64_t misc, int flags)
1147
{
1148
    MCEInjectionParams params = {
1149
        .mon = mon,
1150
        .env = cenv,
1151
        .bank = bank,
1152
        .status = status,
1153
        .mcg_status = mcg_status,
1154
        .addr = addr,
1155
        .misc = misc,
1156
        .flags = flags,
1157
    };
1158
    unsigned bank_num = cenv->mcg_cap & 0xff;
1159
    CPUState *env;
1160

    
1161
    if (!cenv->mcg_cap) {
1162
        monitor_printf(mon, "MCE injection not supported\n");
1163
        return;
1164
    }
1165
    if (bank >= bank_num) {
1166
        monitor_printf(mon, "Invalid MCE bank number\n");
1167
        return;
1168
    }
1169
    if (!(status & MCI_STATUS_VAL)) {
1170
        monitor_printf(mon, "Invalid MCE status code\n");
1171
        return;
1172
    }
1173
    if ((flags & MCE_INJECT_BROADCAST)
1174
        && !cpu_x86_support_mca_broadcast(cenv)) {
1175
        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1176
        return;
1177
    }
1178

    
1179
    run_on_cpu(cenv, do_inject_x86_mce, &params);
1180
    if (flags & MCE_INJECT_BROADCAST) {
1181
        params.bank = 1;
1182
        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1183
        params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1184
        params.addr = 0;
1185
        params.misc = 0;
1186
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
1187
            if (cenv == env) {
1188
                continue;
1189
            }
1190
            params.env = env;
1191
            run_on_cpu(cenv, do_inject_x86_mce, &params);
1192
        }
1193
    }
1194
}
1195
#endif /* !CONFIG_USER_ONLY */
1196

    
1197
static void mce_init(CPUX86State *cenv)
1198
{
1199
    unsigned int bank;
1200

    
1201
    if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1202
        && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1203
            (CPUID_MCE | CPUID_MCA)) {
1204
        cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1205
        cenv->mcg_ctl = ~(uint64_t)0;
1206
        for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1207
            cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1208
        }
1209
    }
1210
}
1211

    
1212
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1213
                            target_ulong *base, unsigned int *limit,
1214
                            unsigned int *flags)
1215
{
1216
    SegmentCache *dt;
1217
    target_ulong ptr;
1218
    uint32_t e1, e2;
1219
    int index;
1220

    
1221
    if (selector & 0x4)
1222
        dt = &env->ldt;
1223
    else
1224
        dt = &env->gdt;
1225
    index = selector & ~7;
1226
    ptr = dt->base + index;
1227
    if ((index + 7) > dt->limit
1228
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1229
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1230
        return 0;
1231

    
1232
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1233
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1234
    if (e2 & DESC_G_MASK)
1235
        *limit = (*limit << 12) | 0xfff;
1236
    *flags = e2;
1237

    
1238
    return 1;
1239
}
1240

    
1241
CPUX86State *cpu_x86_init(const char *cpu_model)
1242
{
1243
    CPUX86State *env;
1244
    static int inited;
1245

    
1246
    env = qemu_mallocz(sizeof(CPUX86State));
1247
    cpu_exec_init(env);
1248
    env->cpu_model_str = cpu_model;
1249

    
1250
    /* init various static tables */
1251
    if (!inited) {
1252
        inited = 1;
1253
        optimize_flags_init();
1254
#ifndef CONFIG_USER_ONLY
1255
        prev_debug_excp_handler =
1256
            cpu_set_debug_excp_handler(breakpoint_handler);
1257
#endif
1258
    }
1259
    if (cpu_x86_register(env, cpu_model) < 0) {
1260
        cpu_x86_close(env);
1261
        return NULL;
1262
    }
1263
    mce_init(env);
1264

    
1265
    qemu_init_vcpu(env);
1266

    
1267
    return env;
1268
}
1269

    
1270
#if !defined(CONFIG_USER_ONLY)
1271
void do_cpu_init(CPUState *env)
1272
{
1273
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1274
    uint64_t pat = env->pat;
1275

    
1276
    cpu_reset(env);
1277
    env->interrupt_request = sipi;
1278
    env->pat = pat;
1279
    apic_init_reset(env->apic_state);
1280
    env->halted = !cpu_is_bsp(env);
1281
}
1282

    
1283
void do_cpu_sipi(CPUState *env)
1284
{
1285
    apic_sipi(env->apic_state);
1286
}
1287
#else
1288
void do_cpu_init(CPUState *env)
1289
{
1290
}
1291
void do_cpu_sipi(CPUState *env)
1292
{
1293
}
1294
#endif