Statistics
| Branch: | Revision:

root / target-i386 / helper.c @ d7582078

History | View | Annotate | Download (38.2 kB)

1
/*
2
 *  i386 helpers (without register variable usage)
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19

    
20
#include "cpu.h"
21
#include "kvm.h"
22
#ifndef CONFIG_USER_ONLY
23
#include "sysemu.h"
24
#include "monitor.h"
25
#endif
26

    
27
//#define DEBUG_MMU
28

    
29
static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30
{
31
    int cpuver = env->cpuid_version;
32

    
33
    if (family == NULL || model == NULL) {
34
        return;
35
    }
36

    
37
    *family = (cpuver >> 8) & 0x0f;
38
    *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
39
}
40

    
41
/* Broadcast MCA signal for processor version 06H_EH and above */
42
int cpu_x86_support_mca_broadcast(CPUX86State *env)
43
{
44
    int family = 0;
45
    int model = 0;
46

    
47
    cpu_x86_version(env, &family, &model);
48
    if ((family == 6 && model >= 14) || family > 6) {
49
        return 1;
50
    }
51

    
52
    return 0;
53
}
54

    
55
/***********************************************************/
56
/* x86 debug */
57

    
58
static const char *cc_op_str[] = {
59
    "DYNAMIC",
60
    "EFLAGS",
61

    
62
    "MULB",
63
    "MULW",
64
    "MULL",
65
    "MULQ",
66

    
67
    "ADDB",
68
    "ADDW",
69
    "ADDL",
70
    "ADDQ",
71

    
72
    "ADCB",
73
    "ADCW",
74
    "ADCL",
75
    "ADCQ",
76

    
77
    "SUBB",
78
    "SUBW",
79
    "SUBL",
80
    "SUBQ",
81

    
82
    "SBBB",
83
    "SBBW",
84
    "SBBL",
85
    "SBBQ",
86

    
87
    "LOGICB",
88
    "LOGICW",
89
    "LOGICL",
90
    "LOGICQ",
91

    
92
    "INCB",
93
    "INCW",
94
    "INCL",
95
    "INCQ",
96

    
97
    "DECB",
98
    "DECW",
99
    "DECL",
100
    "DECQ",
101

    
102
    "SHLB",
103
    "SHLW",
104
    "SHLL",
105
    "SHLQ",
106

    
107
    "SARB",
108
    "SARW",
109
    "SARL",
110
    "SARQ",
111
};
112

    
113
static void
114
cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
115
                       const char *name, struct SegmentCache *sc)
116
{
117
#ifdef TARGET_X86_64
118
    if (env->hflags & HF_CS64_MASK) {
119
        cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
120
                    sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
121
    } else
122
#endif
123
    {
124
        cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
125
                    (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
126
    }
127

    
128
    if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
129
        goto done;
130

    
131
    cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
132
    if (sc->flags & DESC_S_MASK) {
133
        if (sc->flags & DESC_CS_MASK) {
134
            cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
135
                           ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
136
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
137
                        (sc->flags & DESC_R_MASK) ? 'R' : '-');
138
        } else {
139
            cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS  " : "DS16");
140
            cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
141
                        (sc->flags & DESC_W_MASK) ? 'W' : '-');
142
        }
143
        cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
144
    } else {
145
        static const char *sys_type_name[2][16] = {
146
            { /* 32 bit mode */
147
                "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
148
                "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
149
                "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
150
                "CallGate32", "Reserved", "IntGate32", "TrapGate32"
151
            },
152
            { /* 64 bit mode */
153
                "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
154
                "Reserved", "Reserved", "Reserved", "Reserved",
155
                "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
156
                "Reserved", "IntGate64", "TrapGate64"
157
            }
158
        };
159
        cpu_fprintf(f, "%s",
160
                    sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
161
                                 [(sc->flags & DESC_TYPE_MASK)
162
                                  >> DESC_TYPE_SHIFT]);
163
    }
164
done:
165
    cpu_fprintf(f, "\n");
166
}
167

    
168
#define DUMP_CODE_BYTES_TOTAL    50
169
#define DUMP_CODE_BYTES_BACKWARD 20
170

    
171
void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
172
                    int flags)
173
{
174
    int eflags, i, nb;
175
    char cc_op_name[32];
176
    static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
177

    
178
    cpu_synchronize_state(env);
179

    
180
    eflags = env->eflags;
181
#ifdef TARGET_X86_64
182
    if (env->hflags & HF_CS64_MASK) {
183
        cpu_fprintf(f,
184
                    "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
185
                    "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
186
                    "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
187
                    "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
188
                    "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
189
                    env->regs[R_EAX],
190
                    env->regs[R_EBX],
191
                    env->regs[R_ECX],
192
                    env->regs[R_EDX],
193
                    env->regs[R_ESI],
194
                    env->regs[R_EDI],
195
                    env->regs[R_EBP],
196
                    env->regs[R_ESP],
197
                    env->regs[8],
198
                    env->regs[9],
199
                    env->regs[10],
200
                    env->regs[11],
201
                    env->regs[12],
202
                    env->regs[13],
203
                    env->regs[14],
204
                    env->regs[15],
205
                    env->eip, eflags,
206
                    eflags & DF_MASK ? 'D' : '-',
207
                    eflags & CC_O ? 'O' : '-',
208
                    eflags & CC_S ? 'S' : '-',
209
                    eflags & CC_Z ? 'Z' : '-',
210
                    eflags & CC_A ? 'A' : '-',
211
                    eflags & CC_P ? 'P' : '-',
212
                    eflags & CC_C ? 'C' : '-',
213
                    env->hflags & HF_CPL_MASK,
214
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
215
                    (env->a20_mask >> 20) & 1,
216
                    (env->hflags >> HF_SMM_SHIFT) & 1,
217
                    env->halted);
218
    } else
219
#endif
220
    {
221
        cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
222
                    "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
223
                    "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
224
                    (uint32_t)env->regs[R_EAX],
225
                    (uint32_t)env->regs[R_EBX],
226
                    (uint32_t)env->regs[R_ECX],
227
                    (uint32_t)env->regs[R_EDX],
228
                    (uint32_t)env->regs[R_ESI],
229
                    (uint32_t)env->regs[R_EDI],
230
                    (uint32_t)env->regs[R_EBP],
231
                    (uint32_t)env->regs[R_ESP],
232
                    (uint32_t)env->eip, eflags,
233
                    eflags & DF_MASK ? 'D' : '-',
234
                    eflags & CC_O ? 'O' : '-',
235
                    eflags & CC_S ? 'S' : '-',
236
                    eflags & CC_Z ? 'Z' : '-',
237
                    eflags & CC_A ? 'A' : '-',
238
                    eflags & CC_P ? 'P' : '-',
239
                    eflags & CC_C ? 'C' : '-',
240
                    env->hflags & HF_CPL_MASK,
241
                    (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
242
                    (env->a20_mask >> 20) & 1,
243
                    (env->hflags >> HF_SMM_SHIFT) & 1,
244
                    env->halted);
245
    }
246

    
247
    for(i = 0; i < 6; i++) {
248
        cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
249
                               &env->segs[i]);
250
    }
251
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
252
    cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
253

    
254
#ifdef TARGET_X86_64
255
    if (env->hflags & HF_LMA_MASK) {
256
        cpu_fprintf(f, "GDT=     %016" PRIx64 " %08x\n",
257
                    env->gdt.base, env->gdt.limit);
258
        cpu_fprintf(f, "IDT=     %016" PRIx64 " %08x\n",
259
                    env->idt.base, env->idt.limit);
260
        cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
261
                    (uint32_t)env->cr[0],
262
                    env->cr[2],
263
                    env->cr[3],
264
                    (uint32_t)env->cr[4]);
265
        for(i = 0; i < 4; i++)
266
            cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
267
        cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
268
                    env->dr[6], env->dr[7]);
269
    } else
270
#endif
271
    {
272
        cpu_fprintf(f, "GDT=     %08x %08x\n",
273
                    (uint32_t)env->gdt.base, env->gdt.limit);
274
        cpu_fprintf(f, "IDT=     %08x %08x\n",
275
                    (uint32_t)env->idt.base, env->idt.limit);
276
        cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
277
                    (uint32_t)env->cr[0],
278
                    (uint32_t)env->cr[2],
279
                    (uint32_t)env->cr[3],
280
                    (uint32_t)env->cr[4]);
281
        for(i = 0; i < 4; i++) {
282
            cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
283
        }
284
        cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
285
                    env->dr[6], env->dr[7]);
286
    }
287
    if (flags & X86_DUMP_CCOP) {
288
        if ((unsigned)env->cc_op < CC_OP_NB)
289
            snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
290
        else
291
            snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
292
#ifdef TARGET_X86_64
293
        if (env->hflags & HF_CS64_MASK) {
294
            cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
295
                        env->cc_src, env->cc_dst,
296
                        cc_op_name);
297
        } else
298
#endif
299
        {
300
            cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
301
                        (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
302
                        cc_op_name);
303
        }
304
    }
305
    cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
306
    if (flags & X86_DUMP_FPU) {
307
        int fptag;
308
        fptag = 0;
309
        for(i = 0; i < 8; i++) {
310
            fptag |= ((!env->fptags[i]) << i);
311
        }
312
        cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
313
                    env->fpuc,
314
                    (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
315
                    env->fpstt,
316
                    fptag,
317
                    env->mxcsr);
318
        for(i=0;i<8;i++) {
319
            CPU_LDoubleU u;
320
            u.d = env->fpregs[i].d;
321
            cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
322
                        i, u.l.lower, u.l.upper);
323
            if ((i & 1) == 1)
324
                cpu_fprintf(f, "\n");
325
            else
326
                cpu_fprintf(f, " ");
327
        }
328
        if (env->hflags & HF_CS64_MASK)
329
            nb = 16;
330
        else
331
            nb = 8;
332
        for(i=0;i<nb;i++) {
333
            cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
334
                        i,
335
                        env->xmm_regs[i].XMM_L(3),
336
                        env->xmm_regs[i].XMM_L(2),
337
                        env->xmm_regs[i].XMM_L(1),
338
                        env->xmm_regs[i].XMM_L(0));
339
            if ((i & 1) == 1)
340
                cpu_fprintf(f, "\n");
341
            else
342
                cpu_fprintf(f, " ");
343
        }
344
    }
345
    if (flags & CPU_DUMP_CODE) {
346
        target_ulong base = env->segs[R_CS].base + env->eip;
347
        target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
348
        uint8_t code;
349
        char codestr[3];
350

    
351
        cpu_fprintf(f, "Code=");
352
        for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
353
            if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
354
                snprintf(codestr, sizeof(codestr), "%02x", code);
355
            } else {
356
                snprintf(codestr, sizeof(codestr), "??");
357
            }
358
            cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
359
                        i == offs ? "<" : "", codestr, i == offs ? ">" : "");
360
        }
361
        cpu_fprintf(f, "\n");
362
    }
363
}
364

    
365
/***********************************************************/
366
/* x86 mmu */
367
/* XXX: add PGE support */
368

    
369
void cpu_x86_set_a20(CPUX86State *env, int a20_state)
370
{
371
    a20_state = (a20_state != 0);
372
    if (a20_state != ((env->a20_mask >> 20) & 1)) {
373
#if defined(DEBUG_MMU)
374
        printf("A20 update: a20=%d\n", a20_state);
375
#endif
376
        /* if the cpu is currently executing code, we must unlink it and
377
           all the potentially executing TB */
378
        cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
379

    
380
        /* when a20 is changed, all the MMU mappings are invalid, so
381
           we must flush everything */
382
        tlb_flush(env, 1);
383
        env->a20_mask = ~(1 << 20) | (a20_state << 20);
384
    }
385
}
386

    
387
void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
388
{
389
    int pe_state;
390

    
391
#if defined(DEBUG_MMU)
392
    printf("CR0 update: CR0=0x%08x\n", new_cr0);
393
#endif
394
    if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
395
        (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
396
        tlb_flush(env, 1);
397
    }
398

    
399
#ifdef TARGET_X86_64
400
    if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
401
        (env->efer & MSR_EFER_LME)) {
402
        /* enter in long mode */
403
        /* XXX: generate an exception */
404
        if (!(env->cr[4] & CR4_PAE_MASK))
405
            return;
406
        env->efer |= MSR_EFER_LMA;
407
        env->hflags |= HF_LMA_MASK;
408
    } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
409
               (env->efer & MSR_EFER_LMA)) {
410
        /* exit long mode */
411
        env->efer &= ~MSR_EFER_LMA;
412
        env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
413
        env->eip &= 0xffffffff;
414
    }
415
#endif
416
    env->cr[0] = new_cr0 | CR0_ET_MASK;
417

    
418
    /* update PE flag in hidden flags */
419
    pe_state = (env->cr[0] & CR0_PE_MASK);
420
    env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
421
    /* ensure that ADDSEG is always set in real mode */
422
    env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
423
    /* update FPU flags */
424
    env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
425
        ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
426
}
427

    
428
/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
429
   the PDPT */
430
void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
431
{
432
    env->cr[3] = new_cr3;
433
    if (env->cr[0] & CR0_PG_MASK) {
434
#if defined(DEBUG_MMU)
435
        printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
436
#endif
437
        tlb_flush(env, 0);
438
    }
439
}
440

    
441
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
442
{
443
#if defined(DEBUG_MMU)
444
    printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
445
#endif
446
    if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
447
        (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
448
        tlb_flush(env, 1);
449
    }
450
    /* SSE handling */
451
    if (!(env->cpuid_features & CPUID_SSE))
452
        new_cr4 &= ~CR4_OSFXSR_MASK;
453
    if (new_cr4 & CR4_OSFXSR_MASK)
454
        env->hflags |= HF_OSFXSR_MASK;
455
    else
456
        env->hflags &= ~HF_OSFXSR_MASK;
457

    
458
    env->cr[4] = new_cr4;
459
}
460

    
461
#if defined(CONFIG_USER_ONLY)
462

    
463
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
464
                             int is_write, int mmu_idx)
465
{
466
    /* user mode only emulation */
467
    is_write &= 1;
468
    env->cr[2] = addr;
469
    env->error_code = (is_write << PG_ERROR_W_BIT);
470
    env->error_code |= PG_ERROR_U_MASK;
471
    env->exception_index = EXCP0E_PAGE;
472
    return 1;
473
}
474

    
475
#else
476

    
477
/* XXX: This value should match the one returned by CPUID
478
 * and in exec.c */
479
# if defined(TARGET_X86_64)
480
# define PHYS_ADDR_MASK 0xfffffff000LL
481
# else
482
# define PHYS_ADDR_MASK 0xffffff000LL
483
# endif
484

    
485
/* return value:
486
   -1 = cannot handle fault
487
   0  = nothing more to do
488
   1  = generate PF fault
489
*/
490
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
491
                             int is_write1, int mmu_idx)
492
{
493
    uint64_t ptep, pte;
494
    target_ulong pde_addr, pte_addr;
495
    int error_code, is_dirty, prot, page_size, is_write, is_user;
496
    target_phys_addr_t paddr;
497
    uint32_t page_offset;
498
    target_ulong vaddr, virt_addr;
499

    
500
    is_user = mmu_idx == MMU_USER_IDX;
501
#if defined(DEBUG_MMU)
502
    printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
503
           addr, is_write1, is_user, env->eip);
504
#endif
505
    is_write = is_write1 & 1;
506

    
507
    if (!(env->cr[0] & CR0_PG_MASK)) {
508
        pte = addr;
509
        virt_addr = addr & TARGET_PAGE_MASK;
510
        prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
511
        page_size = 4096;
512
        goto do_mapping;
513
    }
514

    
515
    if (env->cr[4] & CR4_PAE_MASK) {
516
        uint64_t pde, pdpe;
517
        target_ulong pdpe_addr;
518

    
519
#ifdef TARGET_X86_64
520
        if (env->hflags & HF_LMA_MASK) {
521
            uint64_t pml4e_addr, pml4e;
522
            int32_t sext;
523

    
524
            /* test virtual address sign extension */
525
            sext = (int64_t)addr >> 47;
526
            if (sext != 0 && sext != -1) {
527
                env->error_code = 0;
528
                env->exception_index = EXCP0D_GPF;
529
                return 1;
530
            }
531

    
532
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
533
                env->a20_mask;
534
            pml4e = ldq_phys(pml4e_addr);
535
            if (!(pml4e & PG_PRESENT_MASK)) {
536
                error_code = 0;
537
                goto do_fault;
538
            }
539
            if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
540
                error_code = PG_ERROR_RSVD_MASK;
541
                goto do_fault;
542
            }
543
            if (!(pml4e & PG_ACCESSED_MASK)) {
544
                pml4e |= PG_ACCESSED_MASK;
545
                stl_phys_notdirty(pml4e_addr, pml4e);
546
            }
547
            ptep = pml4e ^ PG_NX_MASK;
548
            pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
549
                env->a20_mask;
550
            pdpe = ldq_phys(pdpe_addr);
551
            if (!(pdpe & PG_PRESENT_MASK)) {
552
                error_code = 0;
553
                goto do_fault;
554
            }
555
            if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
556
                error_code = PG_ERROR_RSVD_MASK;
557
                goto do_fault;
558
            }
559
            ptep &= pdpe ^ PG_NX_MASK;
560
            if (!(pdpe & PG_ACCESSED_MASK)) {
561
                pdpe |= PG_ACCESSED_MASK;
562
                stl_phys_notdirty(pdpe_addr, pdpe);
563
            }
564
        } else
565
#endif
566
        {
567
            /* XXX: load them when cr3 is loaded ? */
568
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
569
                env->a20_mask;
570
            pdpe = ldq_phys(pdpe_addr);
571
            if (!(pdpe & PG_PRESENT_MASK)) {
572
                error_code = 0;
573
                goto do_fault;
574
            }
575
            ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
576
        }
577

    
578
        pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
579
            env->a20_mask;
580
        pde = ldq_phys(pde_addr);
581
        if (!(pde & PG_PRESENT_MASK)) {
582
            error_code = 0;
583
            goto do_fault;
584
        }
585
        if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
586
            error_code = PG_ERROR_RSVD_MASK;
587
            goto do_fault;
588
        }
589
        ptep &= pde ^ PG_NX_MASK;
590
        if (pde & PG_PSE_MASK) {
591
            /* 2 MB page */
592
            page_size = 2048 * 1024;
593
            ptep ^= PG_NX_MASK;
594
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
595
                goto do_fault_protect;
596
            if (is_user) {
597
                if (!(ptep & PG_USER_MASK))
598
                    goto do_fault_protect;
599
                if (is_write && !(ptep & PG_RW_MASK))
600
                    goto do_fault_protect;
601
            } else {
602
                if ((env->cr[0] & CR0_WP_MASK) &&
603
                    is_write && !(ptep & PG_RW_MASK))
604
                    goto do_fault_protect;
605
            }
606
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
607
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
608
                pde |= PG_ACCESSED_MASK;
609
                if (is_dirty)
610
                    pde |= PG_DIRTY_MASK;
611
                stl_phys_notdirty(pde_addr, pde);
612
            }
613
            /* align to page_size */
614
            pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
615
            virt_addr = addr & ~(page_size - 1);
616
        } else {
617
            /* 4 KB page */
618
            if (!(pde & PG_ACCESSED_MASK)) {
619
                pde |= PG_ACCESSED_MASK;
620
                stl_phys_notdirty(pde_addr, pde);
621
            }
622
            pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
623
                env->a20_mask;
624
            pte = ldq_phys(pte_addr);
625
            if (!(pte & PG_PRESENT_MASK)) {
626
                error_code = 0;
627
                goto do_fault;
628
            }
629
            if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
630
                error_code = PG_ERROR_RSVD_MASK;
631
                goto do_fault;
632
            }
633
            /* combine pde and pte nx, user and rw protections */
634
            ptep &= pte ^ PG_NX_MASK;
635
            ptep ^= PG_NX_MASK;
636
            if ((ptep & PG_NX_MASK) && is_write1 == 2)
637
                goto do_fault_protect;
638
            if (is_user) {
639
                if (!(ptep & PG_USER_MASK))
640
                    goto do_fault_protect;
641
                if (is_write && !(ptep & PG_RW_MASK))
642
                    goto do_fault_protect;
643
            } else {
644
                if ((env->cr[0] & CR0_WP_MASK) &&
645
                    is_write && !(ptep & PG_RW_MASK))
646
                    goto do_fault_protect;
647
            }
648
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
649
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
650
                pte |= PG_ACCESSED_MASK;
651
                if (is_dirty)
652
                    pte |= PG_DIRTY_MASK;
653
                stl_phys_notdirty(pte_addr, pte);
654
            }
655
            page_size = 4096;
656
            virt_addr = addr & ~0xfff;
657
            pte = pte & (PHYS_ADDR_MASK | 0xfff);
658
        }
659
    } else {
660
        uint32_t pde;
661

    
662
        /* page directory entry */
663
        pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
664
            env->a20_mask;
665
        pde = ldl_phys(pde_addr);
666
        if (!(pde & PG_PRESENT_MASK)) {
667
            error_code = 0;
668
            goto do_fault;
669
        }
670
        /* if PSE bit is set, then we use a 4MB page */
671
        if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
672
            page_size = 4096 * 1024;
673
            if (is_user) {
674
                if (!(pde & PG_USER_MASK))
675
                    goto do_fault_protect;
676
                if (is_write && !(pde & PG_RW_MASK))
677
                    goto do_fault_protect;
678
            } else {
679
                if ((env->cr[0] & CR0_WP_MASK) &&
680
                    is_write && !(pde & PG_RW_MASK))
681
                    goto do_fault_protect;
682
            }
683
            is_dirty = is_write && !(pde & PG_DIRTY_MASK);
684
            if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
685
                pde |= PG_ACCESSED_MASK;
686
                if (is_dirty)
687
                    pde |= PG_DIRTY_MASK;
688
                stl_phys_notdirty(pde_addr, pde);
689
            }
690

    
691
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
692
            ptep = pte;
693
            virt_addr = addr & ~(page_size - 1);
694
        } else {
695
            if (!(pde & PG_ACCESSED_MASK)) {
696
                pde |= PG_ACCESSED_MASK;
697
                stl_phys_notdirty(pde_addr, pde);
698
            }
699

    
700
            /* page directory entry */
701
            pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
702
                env->a20_mask;
703
            pte = ldl_phys(pte_addr);
704
            if (!(pte & PG_PRESENT_MASK)) {
705
                error_code = 0;
706
                goto do_fault;
707
            }
708
            /* combine pde and pte user and rw protections */
709
            ptep = pte & pde;
710
            if (is_user) {
711
                if (!(ptep & PG_USER_MASK))
712
                    goto do_fault_protect;
713
                if (is_write && !(ptep & PG_RW_MASK))
714
                    goto do_fault_protect;
715
            } else {
716
                if ((env->cr[0] & CR0_WP_MASK) &&
717
                    is_write && !(ptep & PG_RW_MASK))
718
                    goto do_fault_protect;
719
            }
720
            is_dirty = is_write && !(pte & PG_DIRTY_MASK);
721
            if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
722
                pte |= PG_ACCESSED_MASK;
723
                if (is_dirty)
724
                    pte |= PG_DIRTY_MASK;
725
                stl_phys_notdirty(pte_addr, pte);
726
            }
727
            page_size = 4096;
728
            virt_addr = addr & ~0xfff;
729
        }
730
    }
731
    /* the page can be put in the TLB */
732
    prot = PAGE_READ;
733
    if (!(ptep & PG_NX_MASK))
734
        prot |= PAGE_EXEC;
735
    if (pte & PG_DIRTY_MASK) {
736
        /* only set write access if already dirty... otherwise wait
737
           for dirty access */
738
        if (is_user) {
739
            if (ptep & PG_RW_MASK)
740
                prot |= PAGE_WRITE;
741
        } else {
742
            if (!(env->cr[0] & CR0_WP_MASK) ||
743
                (ptep & PG_RW_MASK))
744
                prot |= PAGE_WRITE;
745
        }
746
    }
747
 do_mapping:
748
    pte = pte & env->a20_mask;
749

    
750
    /* Even if 4MB pages, we map only one 4KB page in the cache to
751
       avoid filling it too fast */
752
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
753
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
754
    vaddr = virt_addr + page_offset;
755

    
756
    tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
757
    return 0;
758
 do_fault_protect:
759
    error_code = PG_ERROR_P_MASK;
760
 do_fault:
761
    error_code |= (is_write << PG_ERROR_W_BIT);
762
    if (is_user)
763
        error_code |= PG_ERROR_U_MASK;
764
    if (is_write1 == 2 &&
765
        (env->efer & MSR_EFER_NXE) &&
766
        (env->cr[4] & CR4_PAE_MASK))
767
        error_code |= PG_ERROR_I_D_MASK;
768
    if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
769
        /* cr2 is not modified in case of exceptions */
770
        stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 
771
                 addr);
772
    } else {
773
        env->cr[2] = addr;
774
    }
775
    env->error_code = error_code;
776
    env->exception_index = EXCP0E_PAGE;
777
    return 1;
778
}
779

    
780
target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
781
{
782
    target_ulong pde_addr, pte_addr;
783
    uint64_t pte;
784
    target_phys_addr_t paddr;
785
    uint32_t page_offset;
786
    int page_size;
787

    
788
    if (env->cr[4] & CR4_PAE_MASK) {
789
        target_ulong pdpe_addr;
790
        uint64_t pde, pdpe;
791

    
792
#ifdef TARGET_X86_64
793
        if (env->hflags & HF_LMA_MASK) {
794
            uint64_t pml4e_addr, pml4e;
795
            int32_t sext;
796

    
797
            /* test virtual address sign extension */
798
            sext = (int64_t)addr >> 47;
799
            if (sext != 0 && sext != -1)
800
                return -1;
801

    
802
            pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
803
                env->a20_mask;
804
            pml4e = ldq_phys(pml4e_addr);
805
            if (!(pml4e & PG_PRESENT_MASK))
806
                return -1;
807

    
808
            pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
809
                         (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
810
            pdpe = ldq_phys(pdpe_addr);
811
            if (!(pdpe & PG_PRESENT_MASK))
812
                return -1;
813
        } else
814
#endif
815
        {
816
            pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
817
                env->a20_mask;
818
            pdpe = ldq_phys(pdpe_addr);
819
            if (!(pdpe & PG_PRESENT_MASK))
820
                return -1;
821
        }
822

    
823
        pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
824
                    (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
825
        pde = ldq_phys(pde_addr);
826
        if (!(pde & PG_PRESENT_MASK)) {
827
            return -1;
828
        }
829
        if (pde & PG_PSE_MASK) {
830
            /* 2 MB page */
831
            page_size = 2048 * 1024;
832
            pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
833
        } else {
834
            /* 4 KB page */
835
            pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
836
                        (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
837
            page_size = 4096;
838
            pte = ldq_phys(pte_addr);
839
        }
840
        pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
841
        if (!(pte & PG_PRESENT_MASK))
842
            return -1;
843
    } else {
844
        uint32_t pde;
845

    
846
        if (!(env->cr[0] & CR0_PG_MASK)) {
847
            pte = addr;
848
            page_size = 4096;
849
        } else {
850
            /* page directory entry */
851
            pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
852
            pde = ldl_phys(pde_addr);
853
            if (!(pde & PG_PRESENT_MASK))
854
                return -1;
855
            if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
856
                pte = pde & ~0x003ff000; /* align to 4MB */
857
                page_size = 4096 * 1024;
858
            } else {
859
                /* page directory entry */
860
                pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
861
                pte = ldl_phys(pte_addr);
862
                if (!(pte & PG_PRESENT_MASK))
863
                    return -1;
864
                page_size = 4096;
865
            }
866
        }
867
        pte = pte & env->a20_mask;
868
    }
869

    
870
    page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
871
    paddr = (pte & TARGET_PAGE_MASK) + page_offset;
872
    return paddr;
873
}
874

    
875
void hw_breakpoint_insert(CPUX86State *env, int index)
876
{
877
    int type, err = 0;
878

    
879
    switch (hw_breakpoint_type(env->dr[7], index)) {
880
    case 0:
881
        if (hw_breakpoint_enabled(env->dr[7], index))
882
            err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
883
                                        &env->cpu_breakpoint[index]);
884
        break;
885
    case 1:
886
        type = BP_CPU | BP_MEM_WRITE;
887
        goto insert_wp;
888
    case 2:
889
         /* No support for I/O watchpoints yet */
890
        break;
891
    case 3:
892
        type = BP_CPU | BP_MEM_ACCESS;
893
    insert_wp:
894
        err = cpu_watchpoint_insert(env, env->dr[index],
895
                                    hw_breakpoint_len(env->dr[7], index),
896
                                    type, &env->cpu_watchpoint[index]);
897
        break;
898
    }
899
    if (err)
900
        env->cpu_breakpoint[index] = NULL;
901
}
902

    
903
void hw_breakpoint_remove(CPUX86State *env, int index)
904
{
905
    if (!env->cpu_breakpoint[index])
906
        return;
907
    switch (hw_breakpoint_type(env->dr[7], index)) {
908
    case 0:
909
        if (hw_breakpoint_enabled(env->dr[7], index))
910
            cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
911
        break;
912
    case 1:
913
    case 3:
914
        cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
915
        break;
916
    case 2:
917
        /* No support for I/O watchpoints yet */
918
        break;
919
    }
920
}
921

    
922
int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
923
{
924
    target_ulong dr6;
925
    int reg, type;
926
    int hit_enabled = 0;
927

    
928
    dr6 = env->dr[6] & ~0xf;
929
    for (reg = 0; reg < 4; reg++) {
930
        type = hw_breakpoint_type(env->dr[7], reg);
931
        if ((type == 0 && env->dr[reg] == env->eip) ||
932
            ((type & 1) && env->cpu_watchpoint[reg] &&
933
             (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
934
            dr6 |= 1 << reg;
935
            if (hw_breakpoint_enabled(env->dr[7], reg))
936
                hit_enabled = 1;
937
        }
938
    }
939
    if (hit_enabled || force_dr6_update)
940
        env->dr[6] = dr6;
941
    return hit_enabled;
942
}
943

    
944
static CPUDebugExcpHandler *prev_debug_excp_handler;
945

    
946
static void breakpoint_handler(CPUX86State *env)
947
{
948
    CPUBreakpoint *bp;
949

    
950
    if (env->watchpoint_hit) {
951
        if (env->watchpoint_hit->flags & BP_CPU) {
952
            env->watchpoint_hit = NULL;
953
            if (check_hw_breakpoints(env, 0))
954
                raise_exception(env, EXCP01_DB);
955
            else
956
                cpu_resume_from_signal(env, NULL);
957
        }
958
    } else {
959
        QTAILQ_FOREACH(bp, &env->breakpoints, entry)
960
            if (bp->pc == env->eip) {
961
                if (bp->flags & BP_CPU) {
962
                    check_hw_breakpoints(env, 1);
963
                    raise_exception(env, EXCP01_DB);
964
                }
965
                break;
966
            }
967
    }
968
    if (prev_debug_excp_handler)
969
        prev_debug_excp_handler(env);
970
}
971

    
972
typedef struct MCEInjectionParams {
973
    Monitor *mon;
974
    CPUX86State *env;
975
    int bank;
976
    uint64_t status;
977
    uint64_t mcg_status;
978
    uint64_t addr;
979
    uint64_t misc;
980
    int flags;
981
} MCEInjectionParams;
982

    
983
static void do_inject_x86_mce(void *data)
984
{
985
    MCEInjectionParams *params = data;
986
    CPUX86State *cenv = params->env;
987
    uint64_t *banks = cenv->mce_banks + 4 * params->bank;
988

    
989
    cpu_synchronize_state(cenv);
990

    
991
    /*
992
     * If there is an MCE exception being processed, ignore this SRAO MCE
993
     * unless unconditional injection was requested.
994
     */
995
    if (!(params->flags & MCE_INJECT_UNCOND_AO)
996
        && !(params->status & MCI_STATUS_AR)
997
        && (cenv->mcg_status & MCG_STATUS_MCIP)) {
998
        return;
999
    }
1000

    
1001
    if (params->status & MCI_STATUS_UC) {
1002
        /*
1003
         * if MSR_MCG_CTL is not all 1s, the uncorrected error
1004
         * reporting is disabled
1005
         */
1006
        if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1007
            monitor_printf(params->mon,
1008
                           "CPU %d: Uncorrected error reporting disabled\n",
1009
                           cenv->cpu_index);
1010
            return;
1011
        }
1012

    
1013
        /*
1014
         * if MSR_MCi_CTL is not all 1s, the uncorrected error
1015
         * reporting is disabled for the bank
1016
         */
1017
        if (banks[0] != ~(uint64_t)0) {
1018
            monitor_printf(params->mon,
1019
                           "CPU %d: Uncorrected error reporting disabled for"
1020
                           " bank %d\n",
1021
                           cenv->cpu_index, params->bank);
1022
            return;
1023
        }
1024

    
1025
        if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1026
            !(cenv->cr[4] & CR4_MCE_MASK)) {
1027
            monitor_printf(params->mon,
1028
                           "CPU %d: Previous MCE still in progress, raising"
1029
                           " triple fault\n",
1030
                           cenv->cpu_index);
1031
            qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1032
            qemu_system_reset_request();
1033
            return;
1034
        }
1035
        if (banks[1] & MCI_STATUS_VAL) {
1036
            params->status |= MCI_STATUS_OVER;
1037
        }
1038
        banks[2] = params->addr;
1039
        banks[3] = params->misc;
1040
        cenv->mcg_status = params->mcg_status;
1041
        banks[1] = params->status;
1042
        cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1043
    } else if (!(banks[1] & MCI_STATUS_VAL)
1044
               || !(banks[1] & MCI_STATUS_UC)) {
1045
        if (banks[1] & MCI_STATUS_VAL) {
1046
            params->status |= MCI_STATUS_OVER;
1047
        }
1048
        banks[2] = params->addr;
1049
        banks[3] = params->misc;
1050
        banks[1] = params->status;
1051
    } else {
1052
        banks[1] |= MCI_STATUS_OVER;
1053
    }
1054
}
1055

    
1056
void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
1057
                        uint64_t status, uint64_t mcg_status, uint64_t addr,
1058
                        uint64_t misc, int flags)
1059
{
1060
    MCEInjectionParams params = {
1061
        .mon = mon,
1062
        .env = cenv,
1063
        .bank = bank,
1064
        .status = status,
1065
        .mcg_status = mcg_status,
1066
        .addr = addr,
1067
        .misc = misc,
1068
        .flags = flags,
1069
    };
1070
    unsigned bank_num = cenv->mcg_cap & 0xff;
1071
    CPUX86State *env;
1072

    
1073
    if (!cenv->mcg_cap) {
1074
        monitor_printf(mon, "MCE injection not supported\n");
1075
        return;
1076
    }
1077
    if (bank >= bank_num) {
1078
        monitor_printf(mon, "Invalid MCE bank number\n");
1079
        return;
1080
    }
1081
    if (!(status & MCI_STATUS_VAL)) {
1082
        monitor_printf(mon, "Invalid MCE status code\n");
1083
        return;
1084
    }
1085
    if ((flags & MCE_INJECT_BROADCAST)
1086
        && !cpu_x86_support_mca_broadcast(cenv)) {
1087
        monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1088
        return;
1089
    }
1090

    
1091
    run_on_cpu(cenv, do_inject_x86_mce, &params);
1092
    if (flags & MCE_INJECT_BROADCAST) {
1093
        params.bank = 1;
1094
        params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1095
        params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1096
        params.addr = 0;
1097
        params.misc = 0;
1098
        for (env = first_cpu; env != NULL; env = env->next_cpu) {
1099
            if (cenv == env) {
1100
                continue;
1101
            }
1102
            params.env = env;
1103
            run_on_cpu(cenv, do_inject_x86_mce, &params);
1104
        }
1105
    }
1106
}
1107

    
1108
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1109
{
1110
    TranslationBlock *tb;
1111

    
1112
    if (kvm_enabled()) {
1113
        env->tpr_access_type = access;
1114

    
1115
        cpu_interrupt(env, CPU_INTERRUPT_TPR);
1116
    } else {
1117
        tb = tb_find_pc(env->mem_io_pc);
1118
        cpu_restore_state(tb, env, env->mem_io_pc);
1119

    
1120
        apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1121
    }
1122
}
1123
#endif /* !CONFIG_USER_ONLY */
1124

    
1125
int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1126
                            target_ulong *base, unsigned int *limit,
1127
                            unsigned int *flags)
1128
{
1129
    SegmentCache *dt;
1130
    target_ulong ptr;
1131
    uint32_t e1, e2;
1132
    int index;
1133

    
1134
    if (selector & 0x4)
1135
        dt = &env->ldt;
1136
    else
1137
        dt = &env->gdt;
1138
    index = selector & ~7;
1139
    ptr = dt->base + index;
1140
    if ((index + 7) > dt->limit
1141
        || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1142
        || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1143
        return 0;
1144

    
1145
    *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1146
    *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1147
    if (e2 & DESC_G_MASK)
1148
        *limit = (*limit << 12) | 0xfff;
1149
    *flags = e2;
1150

    
1151
    return 1;
1152
}
1153

    
1154
X86CPU *cpu_x86_init(const char *cpu_model)
1155
{
1156
    X86CPU *cpu;
1157
    CPUX86State *env;
1158
    static int inited;
1159

    
1160
    cpu = X86_CPU(object_new(TYPE_X86_CPU));
1161
    env = &cpu->env;
1162
    env->cpu_model_str = cpu_model;
1163

    
1164
    /* init various static tables used in TCG mode */
1165
    if (tcg_enabled() && !inited) {
1166
        inited = 1;
1167
        optimize_flags_init();
1168
#ifndef CONFIG_USER_ONLY
1169
        prev_debug_excp_handler =
1170
            cpu_set_debug_excp_handler(breakpoint_handler);
1171
#endif
1172
    }
1173
    if (cpu_x86_register(cpu, cpu_model) < 0) {
1174
        object_delete(OBJECT(cpu));
1175
        return NULL;
1176
    }
1177

    
1178
    x86_cpu_realize(OBJECT(cpu), NULL);
1179

    
1180
    return cpu;
1181
}
1182

    
1183
#if !defined(CONFIG_USER_ONLY)
1184
void do_cpu_init(X86CPU *cpu)
1185
{
1186
    CPUX86State *env = &cpu->env;
1187
    int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1188
    uint64_t pat = env->pat;
1189

    
1190
    cpu_reset(CPU(cpu));
1191
    env->interrupt_request = sipi;
1192
    env->pat = pat;
1193
    apic_init_reset(env->apic_state);
1194
    env->halted = !cpu_is_bsp(env);
1195
}
1196

    
1197
void do_cpu_sipi(X86CPU *cpu)
1198
{
1199
    CPUX86State *env = &cpu->env;
1200

    
1201
    apic_sipi(env->apic_state);
1202
}
1203
#else
1204
void do_cpu_init(X86CPU *cpu)
1205
{
1206
}
1207
void do_cpu_sipi(X86CPU *cpu)
1208
{
1209
}
1210
#endif